diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..0d4be26
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+/framework-tests
+/example-tests
diff --git a/.openshift-tests-extension/openshift:framework:default.json b/.openshift-tests-extension/openshift:framework:default.json
new file mode 100644
index 0000000..00ba1df
--- /dev/null
+++ b/.openshift-tests-extension/openshift:framework:default.json
@@ -0,0 +1,230 @@
+[
+ {
+ "name": "[sig-testing] openshift-tests-extension should be run from the main directory",
+ "otherNames": null,
+ "labels": {
+ "framework": {}
+ },
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:framework:default",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] openshift-tests-extension should run `make` command successfully",
+ "otherNames": null,
+ "labels": {
+ "framework": {}
+ },
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:framework:default",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] openshift-tests-extension should have the example-tests binary",
+ "otherNames": null,
+ "labels": {
+ "framework": {}
+ },
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:framework:default",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] example-tests info should have build metadata",
+ "otherNames": null,
+ "labels": {
+ "framework": {}
+ },
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:framework:default",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] example-tests info should have the expected suites",
+ "otherNames": null,
+ "labels": {
+ "framework": {}
+ },
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:framework:default",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] example-tests info should have the correct component information",
+ "otherNames": null,
+ "labels": {
+ "framework": {}
+ },
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:framework:default",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] example-tests list should list all specs",
+ "otherNames": null,
+ "labels": {
+ "framework": {}
+ },
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:framework:default",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] example-tests list should populate fields",
+ "otherNames": null,
+ "labels": {
+ "framework": {}
+ },
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:framework:default",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] example-tests list should filter specs by suite",
+ "otherNames": null,
+ "labels": {
+ "framework": {}
+ },
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:framework:default",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] example-tests run-suite should contain a test that passed",
+ "otherNames": null,
+ "labels": {
+ "framework": {}
+ },
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:framework:default",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] example-tests run-suite should have the correct error message for the failed test",
+ "otherNames": null,
+ "labels": {
+ "framework": {}
+ },
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:framework:default",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] example-tests run-suite fast suite should not have a slow test",
+ "otherNames": null,
+ "labels": {
+ "framework": {}
+ },
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:framework:default",
+ "lifecycle": "blocking"
+ }
+]
\ No newline at end of file
diff --git a/.openshift-tests-extension/openshift:payload:example-tests.json b/.openshift-tests-extension/openshift:payload:example-tests.json
new file mode 100644
index 0000000..07584a4
--- /dev/null
+++ b/.openshift-tests-extension/openshift:payload:example-tests.json
@@ -0,0 +1,106 @@
+[
+ {
+ "name": "[sig-testing] openshift-tests-extension ordered should run beforeAll once",
+ "otherNames": null,
+ "labels": {},
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:payload:example-tests",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] openshift-tests-extension setup should support beforeEach",
+ "otherNames": null,
+ "labels": {},
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:payload:example-tests",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] openshift-tests-extension should support passing tests",
+ "otherNames": null,
+ "labels": {},
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:payload:example-tests",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] openshift-tests-extension should support failing tests",
+ "otherNames": null,
+ "labels": {},
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:payload:example-tests",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] openshift-tests-extension should support panicking tests",
+ "otherNames": null,
+ "labels": {},
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:payload:example-tests",
+ "lifecycle": "blocking"
+ },
+ {
+ "name": "[sig-testing] openshift-tests-extension should support slow tests",
+ "otherNames": null,
+ "labels": {
+ "SLOW": {}
+ },
+ "tags": null,
+ "resources": {
+ "isolation": {
+ "mode": "",
+ "conflict": null
+ },
+ "memory": "",
+ "duration": "",
+ "timeout": ""
+ },
+ "source": "openshift:payload:example-tests",
+ "lifecycle": "blocking"
+ }
+]
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..6d734ff
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,13 @@
+FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.22-openshift-4.17 AS builder
+WORKDIR /go/src/github.com/openshift-eng/openshift-tests-extension
+COPY . .
+RUN make && \
+ mkdir -p /tmp/build && \
+ cp /go/src/github.com/openshift-eng/openshift-tests-extension/example-tests /tmp/build/example-tests && \
+ gzip /tmp/build/example-tests
+
+FROM registry.ci.openshift.org/ocp/4.17:tools
+COPY --from=builder /tmp/build/example-tests.gz /usr/bin/example-tests.gz
+LABEL io.k8s.display-name="OpenShift Tests Extension" \
+ io.openshift.release.operator=true \
+ io.openshift.tags="openshift,tests,e2e,e2e-extension"
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..065dd6f
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,37 @@
+GO_PKG_NAME := github.com/openshift-eng/openshift-tests-extension
+
+GIT_COMMIT := $(shell git rev-parse --short HEAD)
+BUILD_DATE := $(shell date -u +'%Y-%m-%dT%H:%M:%SZ')
+GIT_TREE_STATE := $(shell if git diff --quiet; then echo clean; else echo dirty; fi)
+
+LDFLAGS := -X '$(GO_PKG_NAME)/pkg/version.CommitFromGit=$(GIT_COMMIT)' \
+ -X '$(GO_PKG_NAME)/pkg/version.BuildDate=$(BUILD_DATE)' \
+ -X '$(GO_PKG_NAME)/pkg/version.GitTreeState=$(GIT_TREE_STATE)'
+
+.PHONY: verify test lint clean unit integration example-tests framework-tests
+
+all: unit build integration
+
+verify: lint
+
+build: example-tests framework-tests
+
+example-tests:
+ go build -ldflags "$(LDFLAGS)" ./cmd/example-tests/...
+
+framework-tests:
+ go build -ldflags "$(LDFLAGS)" ./cmd/framework-tests/...
+
+test: unit integration
+
+unit:
+ go test ./...
+
+integration: build
+ ./framework-tests run-suite framework
+
+lint:
+ ./hack/go-lint.sh run ./...
+
+clean:
+ rm -f example-tests framework-tests
diff --git a/cmd/example-tests/main.go b/cmd/example-tests/main.go
new file mode 100644
index 0000000..a6aa7db
--- /dev/null
+++ b/cmd/example-tests/main.go
@@ -0,0 +1,92 @@
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/cmd"
+ e "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+ g "github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo"
+
+ // If using ginkgo, import your tests here
+ _ "github.com/openshift-eng/openshift-tests-extension/test/example"
+)
+
+func main() {
+ // Extension registry
+ registry := e.NewRegistry()
+
+ // You can declare multiple extensions, but most people will probably only need to create one.
+ ext := e.NewExtension("openshift", "payload", "example-tests")
+
+ // Add suites to the extension. Specifying parents will cause the tests from this suite
+ // to be included when a parent is invoked.
+ ext.AddSuite(
+ e.Suite{
+ Name: "example/tests",
+ Parents: []string{"openshift/conformance/parallel"},
+ })
+
+ // The tests that a suite is composed of can be filtered by CEL expressions. By
+ // default, the qualifiers only apply to tests from this extension.
+ ext.AddSuite(e.Suite{
+ Name: "example/fast",
+ Qualifiers: []string{
+ `!labels.exists(l, l=="SLOW")`,
+ },
+ })
+
+ // Global suites' qualifiers will apply to all tests available, even
+ // those outside of this extension (when invoked by origin).
+ ext.AddGlobalSuite(e.Suite{
+ Name: "example/slow",
+ Qualifiers: []string{
+ `labels.exists(l, l=="SLOW")`,
+ },
+ })
+
+ // If using Ginkgo, build test specs automatically
+ specs, err := g.BuildExtensionTestSpecsFromOpenShiftGinkgoSuite()
+ if err != nil {
+ panic(fmt.Sprintf("couldn't build extension test specs from ginkgo: %+v", err.Error()))
+ }
+
+ // You can also manually build a test specs list from other testing tooling
+ // TODO: example
+
+ // Modify specs:
+ // Add label to all specs
+ // specs = specs.AddLabel("SLOW")
+
+ // Specs can be globally filtered...
+ // specs = specs.MustFilter([]string{`name.contains("filter")`})
+
+ // Or walked...
+ // specs = specs.Walk(func(spec *extensiontests.ExtensionTestSpec) {
+ // if strings.Contains(e.Name, "scale up") {
+ // e.Labels.Insert("SLOW")
+ // }
+ //
+ // Test renames
+ // if spec.Name == "[sig-testing] openshift-tests-extension has a test with a typo" {
+ // spec.OtherNames = sets.New[string](`[sig-testing] openshift-tests-extension has a test with a tpyo`)
+ // }
+ // })
+
+ ext.AddSpecs(specs)
+ registry.Register(ext)
+
+ root := &cobra.Command{
+ Long: "OpenShift Tests Extension Example",
+ }
+
+ root.AddCommand(cmd.DefaultExtensionCommands(registry)...)
+
+ if err := func() error {
+ return root.Execute()
+ }(); err != nil {
+ os.Exit(1)
+ }
+}
diff --git a/cmd/framework-tests/main.go b/cmd/framework-tests/main.go
new file mode 100644
index 0000000..67bbdc3
--- /dev/null
+++ b/cmd/framework-tests/main.go
@@ -0,0 +1,46 @@
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/cmd"
+ e "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+ g "github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo"
+
+ _ "github.com/openshift-eng/openshift-tests-extension/test/framework"
+)
+
+// framework-tests is how we test ourselves, we drink our own champagne. It executes
+// example-tests and checks the various commands work, and that it produces both passed
+// and failed tests.
+func main() {
+ registry := e.NewRegistry()
+ ext := e.NewExtension("openshift", "framework", "default")
+ ext.AddSuite(e.Suite{
+ Name: "framework",
+ })
+
+ // If using Ginkgo, build test specs automatically
+ specs, err := g.BuildExtensionTestSpecsFromOpenShiftGinkgoSuite()
+ if err != nil {
+ panic(fmt.Sprintf("couldn't build extension test specs from ginkgo: %+v", err.Error()))
+ }
+
+ ext.AddSpecs(specs)
+ registry.Register(ext)
+
+ root := &cobra.Command{
+ Long: "OpenShift Tests Extension Framework Testing",
+ }
+
+ root.AddCommand(cmd.DefaultExtensionCommands(registry)...)
+
+ if err := func() error {
+ return root.Execute()
+ }(); err != nil {
+ os.Exit(1)
+ }
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..f516669
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,37 @@
+module github.com/openshift-eng/openshift-tests-extension
+
+go 1.22.4
+
+require (
+ github.com/google/cel-go v0.17.8
+ github.com/onsi/ginkgo/v2 v2.20.2
+ github.com/onsi/gomega v1.33.1
+ github.com/pkg/errors v0.9.1
+ github.com/spf13/cobra v1.8.1
+ github.com/spf13/pflag v1.0.5
+ github.com/stretchr/testify v1.9.0
+ k8s.io/apimachinery v0.31.1
+)
+
+require (
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
+ github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
+ github.com/stoewer/go-strcase v1.2.0 // indirect
+ golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
+ golang.org/x/net v0.28.0 // indirect
+ golang.org/x/sys v0.24.0 // indirect
+ golang.org/x/text v0.17.0 // indirect
+ golang.org/x/tools v0.24.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect
+ google.golang.org/protobuf v1.34.2 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
+
+replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..fa13e5c
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,61 @@
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
+github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
+github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
+github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 h1:NcYD3EiHblfhKo/OBnVeGZGgksTzZjz3U3r82t4tsWg=
+github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
+golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
+golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
+golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
+golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
+golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
+golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA=
+google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
+k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
diff --git a/hack/go-lint.sh b/hack/go-lint.sh
new file mode 100755
index 0000000..6e9cc97
--- /dev/null
+++ b/hack/go-lint.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+# If CI is true, run golangci-lint directly. If we're on a developer's
+# local machine, run golangci-lint from a container so we're ensuring
+# a consistent environment.
+
+set -ex
+
+if [ "$OPENSHIFT_CI" = "true" ];
+then
+ golangci-lint "${@}"
+else
+ DOCKER=${DOCKER:-podman}
+
+ if ! which "$DOCKER" > /dev/null 2>&1;
+ then
+ echo "$DOCKER not found, please install."
+ exit 1
+ fi
+
+ # Check if running on Linux
+ VOLUME_OPTION=""
+ if [[ "$(uname -s)" == "Linux" ]]; then
+ VOLUME_OPTION=":z"
+ fi
+
+ $DOCKER run --rm \
+ --volume "${PWD}:/go/src/github.com/openshift-eng/openshift-tests-extension${VOLUME_OPTION}" \
+ --workdir /go/src/github.com/openshift-eng/openshift-tests-extension \
+ docker.io/golangci/golangci-lint:v1.54.2 \
+ golangci-lint "${@}"
+fi
diff --git a/pkg/cmd/cmd.go b/pkg/cmd/cmd.go
new file mode 100644
index 0000000..5db1741
--- /dev/null
+++ b/pkg/cmd/cmd.go
@@ -0,0 +1,21 @@
+package cmd
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdupdate"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+)
+
+func DefaultExtensionCommands(registry *extension.Registry) []*cobra.Command {
+ return []*cobra.Command{
+ cmdrun.NewRunSuiteCommand(registry),
+ cmdrun.NewRunTestCommand(registry),
+ cmdlist.NewListCommand(registry),
+ cmdinfo.NewInfoCommand(registry),
+ cmdupdate.NewUpdateCommand(registry),
+ }
+}
diff --git a/pkg/cmd/cmdinfo/info.go b/pkg/cmd/cmdinfo/info.go
new file mode 100644
index 0000000..1d42378
--- /dev/null
+++ b/pkg/cmd/cmdinfo/info.go
@@ -0,0 +1,38 @@
+package cmdinfo
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/flags"
+)
+
+func NewInfoCommand(registry *extension.Registry) *cobra.Command {
+ componentFlags := flags.NewComponentFlags()
+
+ cmd := &cobra.Command{
+ Use: "info",
+ Short: "Display extension metadata",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ extension := registry.Get(componentFlags.Component)
+ if extension == nil {
+ return fmt.Errorf("couldn't find the component %q", componentFlags.Component)
+ }
+
+ info, err := json.MarshalIndent(extension, "", " ")
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(os.Stdout, "%s\n", string(info))
+ return nil
+ },
+ }
+ componentFlags.BindFlags(cmd.Flags())
+ return cmd
+}
diff --git a/pkg/cmd/cmdlist/list.go b/pkg/cmd/cmdlist/list.go
new file mode 100644
index 0000000..69f0266
--- /dev/null
+++ b/pkg/cmd/cmdlist/list.go
@@ -0,0 +1,124 @@
+package cmdlist
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/flags"
+)
+
+func NewListCommand(registry *extension.Registry) *cobra.Command {
+ opts := struct {
+ componentFlags *flags.ComponentFlags
+ suiteFlags *flags.SuiteFlags
+ outputFlags *flags.OutputFlags
+ }{
+ suiteFlags: flags.NewSuiteFlags(),
+ componentFlags: flags.NewComponentFlags(),
+ outputFlags: flags.NewOutputFlags(),
+ }
+
+ // Tests
+ listTestsCmd := &cobra.Command{
+ Use: "tests",
+ Short: "List available tests",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ ext := registry.Get(opts.componentFlags.Component)
+ if ext == nil {
+ return fmt.Errorf("component not found: %s", opts.componentFlags.Component)
+ }
+
+ // Find suite, if specified
+ var foundSuite *extension.Suite
+ var err error
+ if opts.suiteFlags.Suite != "" {
+ foundSuite, err = ext.GetSuite(opts.suiteFlags.Suite)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Filter for suite
+ specs := ext.GetSpecs()
+ if foundSuite != nil {
+ specs, err = specs.Filter(foundSuite.Qualifiers)
+ if err != nil {
+ return err
+ }
+ }
+
+ data, err := opts.outputFlags.Marshal(specs)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(os.Stdout, "%s\n", string(data))
+ return nil
+ },
+ }
+ opts.suiteFlags.BindFlags(listTestsCmd.Flags())
+ opts.componentFlags.BindFlags(listTestsCmd.Flags())
+ opts.outputFlags.BindFlags(listTestsCmd.Flags())
+
+ // Suites
+ listSuitesCommand := &cobra.Command{
+ Use: "suites",
+ Short: "List available suites",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ ext := registry.Get(opts.componentFlags.Component)
+ if ext == nil {
+ return fmt.Errorf("component not found: %s", opts.componentFlags.Component)
+ }
+
+ suites := ext.Suites
+
+ data, err := opts.outputFlags.Marshal(suites)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(os.Stdout, "%s\n", string(data))
+ return nil
+ },
+ }
+ opts.componentFlags.BindFlags(listSuitesCommand.Flags())
+ opts.outputFlags.BindFlags(listSuitesCommand.Flags())
+
+ // Components
+ listComponentsCmd := &cobra.Command{
+ Use: "components",
+ Short: "List available components",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ var components []*extension.Component
+ registry.Walk(func(e *extension.Extension) {
+ components = append(components, &e.Component)
+ })
+
+ data, err := opts.outputFlags.Marshal(components)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(os.Stdout, "%s\n", string(data))
+ return nil
+ },
+ }
+ opts.outputFlags.BindFlags(listComponentsCmd.Flags())
+
+ var listCmd = &cobra.Command{
+ Use: "list [subcommand]",
+ Short: "List items",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return listTestsCmd.RunE(cmd, args)
+ },
+ }
+ opts.suiteFlags.BindFlags(listCmd.Flags())
+ opts.componentFlags.BindFlags(listCmd.Flags())
+ opts.outputFlags.BindFlags(listCmd.Flags())
+ listCmd.AddCommand(listTestsCmd, listComponentsCmd, listSuitesCommand)
+
+ return listCmd
+}
diff --git a/pkg/cmd/cmdrun/runsuite.go b/pkg/cmd/cmdrun/runsuite.go
new file mode 100644
index 0000000..c79b3ff
--- /dev/null
+++ b/pkg/cmd/cmdrun/runsuite.go
@@ -0,0 +1,63 @@
+package cmdrun
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/flags"
+)
+
+func NewRunSuiteCommand(registry *extension.Registry) *cobra.Command {
+ opts := struct {
+ componentFlags *flags.ComponentFlags
+ outputFlags *flags.OutputFlags
+ concurrencyFlags *flags.ConcurrencyFlags
+ }{
+ componentFlags: flags.NewComponentFlags(),
+ outputFlags: flags.NewOutputFlags(),
+ concurrencyFlags: flags.NewConcurrencyFlags(),
+ }
+
+ cmd := &cobra.Command{
+ Use: "run-suite NAME",
+ Short: "Run a group of tests by suite",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ ext := registry.Get(opts.componentFlags.Component)
+ if ext == nil {
+ return fmt.Errorf("component not found: %s", opts.componentFlags.Component)
+ }
+ if len(args) != 1 {
+ return fmt.Errorf("must specify one suite name")
+ }
+
+ w, err := extensiontests.NewResultWriter(os.Stdout, extensiontests.ResultFormat(opts.outputFlags.Output))
+ if err != nil {
+ return err
+ }
+ defer w.Flush()
+
+ suite, err := ext.GetSuite(args[0])
+ if err != nil {
+ return errors.Wrapf(err, "couldn't find suite: %s", args[0])
+ }
+
+ specs, err := ext.GetSpecs().Filter(suite.Qualifiers)
+ if err != nil {
+ return errors.Wrap(err, "couldn't filter specs")
+ }
+
+ return specs.Run(w, opts.concurrencyFlags.MaxConcurency)
+ },
+ }
+ opts.componentFlags.BindFlags(cmd.Flags())
+ opts.outputFlags.BindFlags(cmd.Flags())
+ opts.concurrencyFlags.BindFlags(cmd.Flags())
+
+ return cmd
+}
diff --git a/pkg/cmd/cmdrun/runtest.go b/pkg/cmd/cmdrun/runtest.go
new file mode 100644
index 0000000..ea4b62c
--- /dev/null
+++ b/pkg/cmd/cmdrun/runtest.go
@@ -0,0 +1,81 @@
+package cmdrun
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/flags"
+)
+
+func NewRunTestCommand(registry *extension.Registry) *cobra.Command {
+ opts := struct {
+ componentFlags *flags.ComponentFlags
+ concurrencyFlags *flags.ConcurrencyFlags
+ nameFlags *flags.NamesFlags
+ outputFlags *flags.OutputFlags
+ }{
+ componentFlags: flags.NewComponentFlags(),
+ nameFlags: flags.NewNamesFlags(),
+ outputFlags: flags.NewOutputFlags(),
+ concurrencyFlags: flags.NewConcurrencyFlags(),
+ }
+
+ cmd := &cobra.Command{
+ Use: "run-test [-n NAME...] [NAME]",
+ Short: "Runs tests by name",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ ext := registry.Get(opts.componentFlags.Component)
+ if ext == nil {
+ return fmt.Errorf("component not found: %s", opts.componentFlags.Component)
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("use --names to specify more than one test")
+ }
+ opts.nameFlags.Names = append(opts.nameFlags.Names, args...)
+
+ // allow reading tests from an stdin pipe
+ info, err := os.Stdin.Stat()
+ if err != nil {
+ return err
+ }
+ if info.Mode()&os.ModeCharDevice == 0 { // Check if input is from a pipe
+ scanner := bufio.NewScanner(os.Stdin)
+ for scanner.Scan() {
+ opts.nameFlags.Names = append(opts.nameFlags.Names, scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ return fmt.Errorf("error reading from stdin: %v", err)
+ }
+ }
+
+ if len(opts.nameFlags.Names) == 0 {
+ return fmt.Errorf("must specify at least one test")
+ }
+
+ specs, err := ext.FindSpecsByName(opts.nameFlags.Names...)
+ if err != nil {
+ return err
+ }
+
+ w, err := extensiontests.NewResultWriter(os.Stdout, extensiontests.ResultFormat(opts.outputFlags.Output))
+ if err != nil {
+ return err
+ }
+ defer w.Flush()
+
+ return specs.Run(w, opts.concurrencyFlags.MaxConcurency)
+ },
+ }
+ opts.componentFlags.BindFlags(cmd.Flags())
+ opts.nameFlags.BindFlags(cmd.Flags())
+ opts.outputFlags.BindFlags(cmd.Flags())
+ opts.concurrencyFlags.BindFlags(cmd.Flags())
+
+ return cmd
+}
diff --git a/pkg/cmd/cmdupdate/update.go b/pkg/cmd/cmdupdate/update.go
new file mode 100644
index 0000000..843b0a0
--- /dev/null
+++ b/pkg/cmd/cmdupdate/update.go
@@ -0,0 +1,100 @@
+package cmdupdate
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/flags"
+)
+
+const metadataDirectory = ".openshift-tests-extension"
+
+// NewUpdateCommand adds an "update" command used to generate and verify the metadata we keep track of. This should
+// be a black box to end users, i.e. we can add more criteria later they'll consume when revendoring. For now,
+// we prevent a test to be renamed without updating other names, or a test to be deleted.
+func NewUpdateCommand(registry *extension.Registry) *cobra.Command {
+ componentFlags := flags.NewComponentFlags()
+
+ cmd := &cobra.Command{
+ Use: "update",
+ Short: "Update test metadata",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ ext := registry.Get(componentFlags.Component)
+ if ext == nil {
+ return fmt.Errorf("couldn't find the component %q", componentFlags.Component)
+ }
+
+ // Create the metadata directory if it doesn't exist
+ if err := os.MkdirAll(metadataDirectory, 0755); err != nil {
+ return fmt.Errorf("failed to create directory %s: %w", metadataDirectory, err)
+ }
+
+ // Read existing specs
+ metadataPath := filepath.Join(metadataDirectory, fmt.Sprintf("%s.json", ext.Component.Identifier()))
+ var oldSpecs extensiontests.ExtensionTestSpecs
+ source, err := os.Open(metadataPath)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return fmt.Errorf("failed to open file: %s: %+w", metadataPath, err)
+ }
+ } else {
+ if err := json.NewDecoder(source).Decode(&oldSpecs); err != nil {
+ return fmt.Errorf("failed to decode file: %s: %+w", metadataPath, err)
+ }
+
+ missing, err := ext.FindRemovedTestsWithoutRename(oldSpecs)
+ if err != nil && len(missing) > 0 {
+ fmt.Fprintf(os.Stderr, "Missing Tests:\n")
+ for _, name := range missing {
+ fmt.Fprintf(os.Stdout, " * %s\n", name)
+ }
+ fmt.Fprintf(os.Stderr, "\n")
+
+ return fmt.Errorf("missing tests, if you've renamed tests you must add their names to OtherNames, " +
+ "or mark them obsolete")
+ }
+ }
+
+ // no missing tests, write the results
+ newSpecs := ext.GetSpecs()
+ data, err := json.MarshalIndent(newSpecs, "", " ")
+ if err != nil {
+ return fmt.Errorf("failed to marshal specs to JSON: %w", err)
+ }
+
+ // Write the JSON data to the file
+ if err := os.WriteFile(metadataPath, data, 0644); err != nil {
+ return fmt.Errorf("failed to write file %s: %w", metadataPath, err)
+ }
+
+ fmt.Printf("successfully updated metadata\n")
+ return nil
+ },
+ }
+ componentFlags.BindFlags(cmd.Flags())
+ return cmd
+}
+
+// Function to find old names not present in new names
+func findMissingNames(allOldNames, allNewNames []string) []string {
+ nameExists := make(map[string]bool)
+ for _, newName := range allNewNames {
+ nameExists[newName] = true
+ }
+
+ var missingNames []string
+ for _, oldName := range allOldNames {
+ if !nameExists[oldName] {
+ missingNames = append(missingNames, oldName)
+ }
+ }
+
+ return missingNames
+}
diff --git a/pkg/extension/extension.go b/pkg/extension/extension.go
new file mode 100644
index 0000000..41cb0b4
--- /dev/null
+++ b/pkg/extension/extension.go
@@ -0,0 +1,148 @@
+package extension
+
+import (
+ "fmt"
+ "strings"
+
+ et "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/version"
+)
+
+func NewExtension(product, kind, name string) *Extension {
+ return &Extension{
+ APIVersion: CurrentExtensionAPIVersion,
+ Source: Source{
+ Commit: version.CommitFromGit,
+ BuildDate: version.BuildDate,
+ GitTreeState: version.GitTreeState,
+ },
+ Component: Component{
+ Product: product,
+ Kind: kind,
+ Name: name,
+ },
+ }
+}
+
+func (e *Extension) GetSuite(name string) (*Suite, error) {
+ var suite *Suite
+
+ for _, s := range e.Suites {
+ if s.Name == name {
+ suite = &s
+ break
+ }
+ }
+
+ if suite == nil {
+ return nil, fmt.Errorf("no such suite: %s", name)
+ }
+
+ return suite, nil
+}
+
+func (e *Extension) GetSpecs() et.ExtensionTestSpecs {
+ return e.specs
+}
+
+func (e *Extension) AddSpecs(specs et.ExtensionTestSpecs) {
+ specs.Walk(func(spec *et.ExtensionTestSpec) {
+ spec.Source = e.Component.Identifier()
+ })
+
+ e.specs = append(e.specs, specs...)
+}
+
+// IgnoreObsoleteTests allows removal of a test.
+func (e *Extension) IgnoreObsoleteTests(testNames ...string) {
+ e.obsoleteTests = append(e.obsoleteTests, testNames...)
+}
+
+// FindRemovedTestsWithoutRename compares two collections of specs, and if specs is missing a test from oldSpecs,
+// including consideration of other names, we return an error. Can be used to detect test renames or removals.
+func (e *Extension) FindRemovedTestsWithoutRename(oldSpecs et.ExtensionTestSpecs) ([]string, error) {
+ currentSpecs := e.GetSpecs()
+ // It's neat we can do it with CEL but can it handle it when we've got 10K tests in there?
+ potentiallyMissing, err := oldSpecs.Filter([]string{fmt.Sprintf(`!(name in %s)`, strSliceToCEL(currentSpecs.Names()))})
+ if err != nil {
+ return nil, err
+ }
+
+ actuallyMissing, err := potentiallyMissing.Filter([]string{fmt.Sprintf(`!(%s.exists(d, name == d))`,
+ strSliceToCEL(currentSpecs.OtherNames()))})
+ if err != nil {
+ return nil, err
+ }
+
+ var unpermittedMissingTests []string
+ for _, spec := range actuallyMissing {
+ missing := true
+ for _, allowed := range e.obsoleteTests {
+ if spec.Name == allowed {
+ missing = false
+ }
+ }
+ if missing {
+ unpermittedMissingTests = append(unpermittedMissingTests, spec.Name)
+ }
+ }
+
+ if len(unpermittedMissingTests) > 0 {
+ return unpermittedMissingTests, fmt.Errorf("some tests were not found")
+ }
+
+ return nil, nil
+}
+
+// AddGlobalSuite adds a suite whose qualifiers will apply to all tests,
+// not just this one. Allowing a developer to create a composed suite of
+// tests from many sources.
+func (e *Extension) AddGlobalSuite(suite Suite) *Extension {
+ if e.Suites == nil {
+ e.Suites = []Suite{suite}
+ } else {
+ e.Suites = append(e.Suites, suite)
+ }
+
+ return e
+}
+
+// AddSuite adds a suite whose qualifiers will only apply to tests present
+// in its own extension.
+func (e *Extension) AddSuite(suite Suite) *Extension {
+ expr := fmt.Sprintf("source == %q", e.Component.Identifier())
+ for i := range suite.Qualifiers {
+ suite.Qualifiers[i] = fmt.Sprintf("(%s) && (%s)", expr, suite.Qualifiers[i])
+ }
+ e.AddGlobalSuite(suite)
+ return e
+}
+
+func (e *Extension) FindSpecsByName(names ...string) (et.ExtensionTestSpecs, error) {
+ var specs et.ExtensionTestSpecs
+ var notFound []string
+
+ for _, name := range names {
+ found := false
+ for i := range e.specs {
+ if e.specs[i].Name == name {
+ specs = append(specs, e.specs[i])
+ found = true
+ break
+ }
+ }
+ if !found {
+ notFound = append(notFound, name)
+ }
+ }
+
+ if len(notFound) > 0 {
+ return nil, fmt.Errorf("no such tests: %s", strings.Join(notFound, ", "))
+ }
+
+ return specs, nil
+}
+
+func (e *Component) Identifier() string {
+ return fmt.Sprintf("%s:%s:%s", e.Product, e.Kind, e.Name)
+}
diff --git a/pkg/extension/extension_test.go b/pkg/extension/extension_test.go
new file mode 100644
index 0000000..d2e5573
--- /dev/null
+++ b/pkg/extension/extension_test.go
@@ -0,0 +1,91 @@
+package extension
+
+import (
+ "reflect"
+ "testing"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+
+ et "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+)
+
+func TestExtension_FindRemovedTestsWithoutRename(t *testing.T) {
+ tests := []struct {
+ name string
+ old et.ExtensionTestSpecs
+ new et.ExtensionTestSpecs
+ obsoleteTests []string
+ want []string
+ wantErr bool
+ }{
+ {
+ name: "allows a test to be renamed",
+ old: et.ExtensionTestSpecs{
+ {
+ Name: "this test has a tpyo",
+ },
+ },
+ new: et.ExtensionTestSpecs{
+ {
+ Name: "this test doesn't have a typo",
+ OtherNames: sets.New[string]("this test has a tpyo"),
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "fails when a test is removed",
+ old: et.ExtensionTestSpecs{
+ {
+ Name: "this test was deleted",
+ },
+ },
+ new: et.ExtensionTestSpecs{},
+ want: []string{"this test was deleted"},
+ wantErr: true,
+ },
+ {
+ name: "succeeds when a test is removed and it's marked obsolete",
+ old: et.ExtensionTestSpecs{
+ {
+ Name: "this test was deleted",
+ },
+ },
+ new: et.ExtensionTestSpecs{},
+ obsoleteTests: []string{"this test was deleted"},
+ wantErr: false,
+ },
+ {
+ name: "fails when a test is renamed without other names",
+ old: et.ExtensionTestSpecs{
+ {
+ Name: "this test has a tpyo",
+ },
+ },
+ new: et.ExtensionTestSpecs{
+ {
+ Name: "this test doesn't have a typo",
+ },
+ },
+ want: []string{"this test has a tpyo"},
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ext := NewExtension("openshift", "testing", "dummy")
+ ext.AddSpecs(tt.new)
+ ext.IgnoreObsoleteTests(tt.obsoleteTests...)
+
+ got, err := ext.FindRemovedTestsWithoutRename(tt.old)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("FindRemovedTestsWithoutRename() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("FindRemovedTestsWithoutRename() got = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/pkg/extension/extensiontests/result.go b/pkg/extension/extensiontests/result.go
new file mode 100644
index 0000000..b0ae1c4
--- /dev/null
+++ b/pkg/extension/extensiontests/result.go
@@ -0,0 +1,7 @@
+package extensiontests
+
+func (results ExtensionTestResults) Walk(walkFn func(*ExtensionTestResult)) {
+ for i := range results {
+ walkFn(results[i])
+ }
+}
diff --git a/pkg/extension/extensiontests/result_writer.go b/pkg/extension/extensiontests/result_writer.go
new file mode 100644
index 0000000..104e266
--- /dev/null
+++ b/pkg/extension/extensiontests/result_writer.go
@@ -0,0 +1,61 @@
+package extensiontests
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+type ResultFormat string
+
+var (
+ JSON ResultFormat = "json"
+ JSONL ResultFormat = "jsonl"
+)
+
+type ResultWriter struct {
+ out io.Writer
+ format ResultFormat
+ results ExtensionTestResults
+}
+
+func NewResultWriter(out io.Writer, format ResultFormat) (*ResultWriter, error) {
+ switch format {
+ case JSON, JSONL:
+ // do nothing
+ default:
+ return nil, fmt.Errorf("unsupported result format: %s", format)
+ }
+
+ return &ResultWriter{
+ out: out,
+ format: format,
+ }, nil
+}
+
+func (w *ResultWriter) Write(result *ExtensionTestResult) {
+ switch w.format {
+ case JSONL:
+ // JSONL gets written to out as we get the items
+ data, err := json.Marshal(result)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Fprintf(w.out, "%s\n", string(data))
+ case JSON:
+ w.results = append(w.results, result)
+ }
+}
+
+func (w *ResultWriter) Flush() {
+ switch w.format {
+ case JSONL:
+ // we already wrote it out
+ case JSON:
+ data, err := json.MarshalIndent(w.results, "", " ")
+ if err != nil {
+ panic(err)
+ }
+ fmt.Fprintf(w.out, "%s\n", string(data))
+ }
+}
diff --git a/pkg/extension/extensiontests/spec.go b/pkg/extension/extensiontests/spec.go
new file mode 100644
index 0000000..ee5a744
--- /dev/null
+++ b/pkg/extension/extensiontests/spec.go
@@ -0,0 +1,215 @@
+package extensiontests
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/types"
+
+ t "github.com/openshift-eng/openshift-tests-extension/pkg/time"
+)
+
+func (specs ExtensionTestSpecs) Walk(walkFn func(*ExtensionTestSpec)) ExtensionTestSpecs {
+ for i := range specs {
+ walkFn(specs[i])
+ }
+
+ return specs
+}
+
+func (specs ExtensionTestSpecs) OtherNames() []string {
+ var names []string
+ for _, spec := range specs {
+ for other := range spec.OtherNames {
+ names = append(names, other)
+ }
+ }
+ return names
+}
+
+func (specs ExtensionTestSpecs) Names() []string {
+ var names []string
+ for _, spec := range specs {
+ names = append(names, spec.Name)
+ }
+ return names
+}
+
+func (specs ExtensionTestSpecs) Run(w *ResultWriter, maxConcurrent int) error {
+ queue := make(chan *ExtensionTestSpec)
+ failures := atomic.Int64{}
+
+ // Feed the queue
+ go func() {
+ specs.Walk(func(spec *ExtensionTestSpec) {
+ queue <- spec
+ })
+ close(queue)
+ }()
+
+ // Start consumers
+ var wg sync.WaitGroup
+ for i := 0; i < maxConcurrent; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for spec := range queue {
+ res := runSpec(spec)
+ if res.Result == ResultFailed {
+ failures.Add(1)
+ }
+ w.Write(res)
+ }
+ }()
+ }
+
+ // Wait for all consumers to finish
+ wg.Wait()
+
+ failCount := failures.Load()
+ if failCount > 0 {
+ return fmt.Errorf("%d tests failed", failCount)
+ }
+ return nil
+}
+
+func (specs ExtensionTestSpecs) MustFilter(celExprs []string) ExtensionTestSpecs {
+ specs, err := specs.Filter(celExprs)
+ if err != nil {
+ panic(fmt.Sprintf("filter did not succeed: %s", err.Error()))
+ }
+
+ return specs
+}
+
+func (specs ExtensionTestSpecs) Filter(celExprs []string) (ExtensionTestSpecs, error) {
+ var filteredSpecs ExtensionTestSpecs
+
+ // Empty filters returns all
+ if len(celExprs) == 0 {
+ return specs, nil
+ }
+
+ env, err := cel.NewEnv(
+ cel.Declarations(
+ decls.NewVar("source", decls.String),
+ decls.NewVar("name", decls.String),
+ decls.NewVar("other_names", decls.NewListType(decls.String)),
+ decls.NewVar("labels", decls.NewListType(decls.String)),
+ decls.NewVar("tags", decls.NewMapType(decls.String, decls.String)),
+ ),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create CEL environment: %w", err)
+ }
+
+ // OR all expressions together
+ for _, spec := range specs {
+ include := false
+ for _, celExpr := range celExprs {
+ // Parse CEL expression
+ ast, iss := env.Parse(celExpr)
+ if iss.Err() != nil {
+ return nil, fmt.Errorf("error parsing CEL expression '%s': %v", celExpr, iss.Err())
+ }
+
+ // Check the AST
+ checked, iss := env.Check(ast)
+ if iss.Err() != nil {
+ return nil, fmt.Errorf("error checking CEL expression '%s': %v", celExpr, iss.Err())
+ }
+
+ // Create a CEL program from the checked AST
+ prg, err := env.Program(checked)
+ if err != nil {
+ return nil, fmt.Errorf("error creating CEL program: %v", err)
+ }
+
+ out, _, err := prg.Eval(map[string]interface{}{
+ "name": spec.Name,
+ "source": spec.Source,
+ "other_names": spec.OtherNames,
+ "labels": spec.Labels.UnsortedList(),
+ "tags": spec.Tags,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("error evaluating CEL expression: %v", err)
+ }
+
+ // If any CEL expression evaluates to true, include the TestSpec
+ if out == types.True {
+ include = true
+ break
+ }
+ }
+ if include {
+ filteredSpecs = append(filteredSpecs, spec)
+ }
+ }
+
+ return filteredSpecs, nil
+}
+
+func (specs ExtensionTestSpecs) AddLabel(labels ...string) ExtensionTestSpecs {
+ for i := range specs {
+ specs[i].Labels.Insert(labels...)
+ }
+
+ return specs
+}
+
+func (specs ExtensionTestSpecs) RemoveLabel(labels ...string) ExtensionTestSpecs {
+ for i := range specs {
+ specs[i].Labels.Delete(labels...)
+ }
+
+ return specs
+}
+
+func (specs ExtensionTestSpecs) SetTag(key, value string) ExtensionTestSpecs {
+ for i := range specs {
+ specs[i].Tags[key] = value
+ }
+
+ return specs
+}
+
+func (specs ExtensionTestSpecs) UnsetTag(key string) ExtensionTestSpecs {
+ for i := range specs {
+ delete(specs[i].Tags, key)
+ }
+
+ return specs
+}
+
+func runSpec(spec *ExtensionTestSpec) *ExtensionTestResult {
+ startTime := time.Now().UTC()
+ res := spec.Run()
+ duration := time.Since(startTime)
+ endTime := startTime.Add(duration).UTC()
+ if res == nil {
+ // this shouldn't happen
+ panic(fmt.Sprintf("test produced no result: %s", spec.Name))
+ }
+
+ res.Lifecycle = spec.Lifecycle
+
+ // If the runner doesn't populate this info, we should set it
+ if res.StartTime == nil {
+ dbTime := t.DBTime(startTime)
+ res.StartTime = &dbTime
+ }
+ if res.EndTime == nil {
+ dbTime := t.DBTime(endTime)
+ res.EndTime = &dbTime
+ }
+ if res.Duration == 0 {
+ res.Duration = duration.Milliseconds()
+ }
+
+ return res
+}
diff --git a/pkg/extension/extensiontests/spec_test.go b/pkg/extension/extensiontests/spec_test.go
new file mode 100644
index 0000000..5eedfc6
--- /dev/null
+++ b/pkg/extension/extensiontests/spec_test.go
@@ -0,0 +1,126 @@
+package extensiontests
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+func TestExtensionTestSpecs_Walk(t *testing.T) {
+ specs := ExtensionTestSpecs{
+ {Name: "test1"},
+ {Name: "test2"},
+ }
+
+ var walkedNames []string
+ specs.Walk(func(spec *ExtensionTestSpec) {
+ walkedNames = append(walkedNames, spec.Name)
+ })
+
+ assert.Equal(t, []string{"test1", "test2"}, walkedNames)
+}
+
+func TestExtensionTestSpecs_MustFilter(t *testing.T) {
+ specs := ExtensionTestSpecs{
+ {Name: "test1"},
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ assert.Contains(t, r.(string), "filter did not succeed")
+ }
+ }()
+
+ // CEL expression that should fail
+ specs.MustFilter([]string{"invalid_expr"})
+ t.Errorf("Expected panic, but code continued")
+}
+
+func TestExtensionTestSpecs_Filter(t *testing.T) {
+ tests := []struct {
+ name string
+ specs ExtensionTestSpecs
+ celExprs []string
+ want ExtensionTestSpecs
+ wantErr bool
+ }{
+ {
+ name: "simple filter on name",
+ specs: ExtensionTestSpecs{
+ {
+ Name: "test1",
+ },
+ {
+ Name: "test2",
+ },
+ },
+ celExprs: []string{`name == "test1"`},
+ want: ExtensionTestSpecs{
+ {
+ Name: "test1",
+ },
+ },
+ },
+ {
+ name: "filter on tags",
+ specs: ExtensionTestSpecs{
+ {Name: "test1", Tags: map[string]string{"env": "prod"}},
+ {Name: "test2", Tags: map[string]string{"env": "dev"}},
+ },
+ celExprs: []string{"tags['env'] == 'prod'"},
+ want: ExtensionTestSpecs{
+ {Name: "test1", Tags: map[string]string{"env": "prod"}},
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := tt.specs.Filter(tt.celExprs)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("Filter() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("Filter() got = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestExtensionTestSpecs_AddLabel(t *testing.T) {
+ specs := ExtensionTestSpecs{
+ {Name: "test1", Labels: sets.New[string]()},
+ }
+
+ specs = specs.AddLabel("critical")
+ assert.True(t, specs[0].Labels.Has("critical"))
+}
+
+func TestExtensionTestSpecs_RemoveLabel(t *testing.T) {
+ specs := ExtensionTestSpecs{
+ {Name: "test1", Labels: sets.New[string]("to_remove")},
+ }
+ specs = specs.RemoveLabel("to_remove")
+ assert.False(t, specs[0].Labels.Has("to_remove"))
+}
+
+func TestExtensionTestSpecs_SetTag(t *testing.T) {
+ specs := ExtensionTestSpecs{
+ {Name: "test1", Tags: make(map[string]string)},
+ }
+
+ specs = specs.SetTag("priority", "high")
+ assert.Equal(t, "high", specs[0].Tags["priority"])
+}
+
+func TestExtensionTestSpecs_UnsetTag(t *testing.T) {
+ specs := ExtensionTestSpecs{
+ {Name: "test1", Tags: map[string]string{"priority": "high"}},
+ }
+
+ specs = specs.UnsetTag("priority")
+ _, exists := specs[0].Tags["priority"]
+ assert.False(t, exists)
+}
diff --git a/pkg/extension/extensiontests/types.go b/pkg/extension/extensiontests/types.go
new file mode 100644
index 0000000..e169750
--- /dev/null
+++ b/pkg/extension/extensiontests/types.go
@@ -0,0 +1,75 @@
+package extensiontests
+
+import (
+ "k8s.io/apimachinery/pkg/util/sets"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/time"
+)
+
+type Lifecycle string
+
+var LifecycleInforming Lifecycle = "informing"
+var LifecycleBlocking Lifecycle = "blocking"
+
+type ExtensionTestSpecs []*ExtensionTestSpec
+
+type ExtensionTestSpec struct {
+ Name string `json:"name"`
+
+ // OtherNames contains a list of historical names for this test. If the test gets renamed in the future,
+ // this slice must report all the previous names for this test to preserve history.
+ OtherNames sets.Set[string] `json:"otherNames"`
+
+ // Labels are single string values to apply to the test spec
+ Labels sets.Set[string] `json:"labels"`
+
+ // Tags are key:value pairs
+ Tags map[string]string `json:"tags"`
+
+ // Resources gives optional information about what's required to run this test.
+ Resources Resources `json:"resources"`
+
+ // Source is the origin of the test.
+ Source string `json:"source"`
+
+ // Lifecycle informs the executor whether the test is informing only, and should not cause the
+ // overall job run to fail, or if it's blocking where a failure of the test is fatal.
+ // Informing lifecycle tests can be used temporarily to gather information about a test's stability.
+ // Tests must not remain informing forever.
+ Lifecycle Lifecycle `json:"lifecycle"`
+
+ // Run invokes a test (TODO:relace gingko spec state with our own)
+ Run func() *ExtensionTestResult `json:"-"`
+}
+
+type Resources struct {
+ Isolation Isolation `json:"isolation"`
+ Memory string `json:"memory"`
+ Duration string `json:"duration"`
+ Timeout string `json:"timeout"`
+}
+
+type Isolation struct {
+ Mode string `json:"mode"`
+ Conflict []string `json:"conflict"`
+}
+
+type ExtensionTestResults []*ExtensionTestResult
+
+type Result string
+
+var ResultPassed Result = "passed"
+var ResultSkipped Result = "skipped"
+var ResultFailed Result = "failed"
+
+type ExtensionTestResult struct {
+ Name string `json:"name"`
+ Lifecycle Lifecycle `json:"lifecycle"`
+ Duration int64 `json:"duration"`
+ StartTime *time.DBTime `json:"startTime"`
+ EndTime *time.DBTime `json:"endTime"`
+ Result Result `json:"result"`
+ Output string `json:"output"`
+ Error string `json:"error"`
+ Details []string `json:"details"`
+}
diff --git a/pkg/extension/registry.go b/pkg/extension/registry.go
new file mode 100644
index 0000000..cbffa04
--- /dev/null
+++ b/pkg/extension/registry.go
@@ -0,0 +1,41 @@
+package extension
+
+import "fmt"
+
+const DefaultExtension = "default"
+
+type Registry struct {
+ extensions map[string]*Extension
+}
+
+func NewRegistry() *Registry {
+ var r Registry
+ return &r
+}
+
+func (r *Registry) Walk(walkFn func(*Extension)) {
+ for k := range r.extensions {
+ if k == DefaultExtension {
+ continue
+ }
+ walkFn(r.extensions[k])
+ }
+}
+
+func (r *Registry) Get(name string) *Extension {
+ return r.extensions[name]
+}
+
+func (r *Registry) Register(extension *Extension) {
+ if r.extensions == nil {
+ r.extensions = make(map[string]*Extension)
+ // first extension is default
+ r.extensions[DefaultExtension] = extension
+ }
+
+ r.extensions[fmt.Sprintf("%s", extension.Component.Identifier())] = extension
+}
+
+func (r *Registry) Deregister(name string) {
+ delete(r.extensions, name)
+}
diff --git a/pkg/extension/types.go b/pkg/extension/types.go
new file mode 100644
index 0000000..beb254d
--- /dev/null
+++ b/pkg/extension/types.go
@@ -0,0 +1,53 @@
+package extension
+
+import (
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+)
+
+const CurrentExtensionAPIVersion = "v1.0"
+
+// Extension represents an extension to openshift-tests.
+type Extension struct {
+ APIVersion string `json:"apiVersion"`
+ Source Source `json:"source"`
+ Component Component `json:"component"`
+
+ // Suites that the extension wants to advertise/participate in.
+ Suites []Suite `json:"suites"`
+
+ // Private data
+ specs extensiontests.ExtensionTestSpecs
+ obsoleteTests []string
+}
+
+// Source contains the details of the commit and source URL.
+type Source struct {
+ // Commit from which this binary was compiled.
+ Commit string `json:"commit"`
+ // BuildDate ISO8601 string of when the binary was built
+ BuildDate string `json:"build_date"`
+ // GitTreeState lets you know the status of the git tree (clean/dirty)
+ GitTreeState string `json:"git_tree_state"`
+ // SourceURL contains the url of the git repository (if known) that this extension was built from.
+ SourceURL string `json:"source_url,omitempty"`
+}
+
+// Component represents the component the binary acts on.
+type Component struct {
+ // The product this component is part of.
+ Product string `json:"product"`
+ // The type of the component.
+ Kind string `json:"type"`
+ // The name of the component.
+ Name string `json:"name"`
+}
+
+// Suite represents additional suites the extension wants to advertise.
+type Suite struct {
+ // The name of the suite.
+ Name string `json:"name"`
+ // Parent suites this suite is part of.
+ Parents []string `json:"parents,omitempty"`
+ // Qualifiers are CEL expressions that are OR'd together for test selection that are members of the suite.
+ Qualifiers []string `json:"qualifiers,omitempty"`
+}
diff --git a/pkg/extension/util.go b/pkg/extension/util.go
new file mode 100644
index 0000000..505ec20
--- /dev/null
+++ b/pkg/extension/util.go
@@ -0,0 +1,10 @@
+package extension
+
+import (
+ "fmt"
+ "strings"
+)
+
+func strSliceToCEL(s []string) string {
+ return fmt.Sprintf(`["%s"]`, strings.Join(s, `","`))
+}
diff --git a/pkg/flags/component.go b/pkg/flags/component.go
new file mode 100644
index 0000000..55e9181
--- /dev/null
+++ b/pkg/flags/component.go
@@ -0,0 +1,25 @@
+package flags
+
+import (
+ "github.com/spf13/pflag"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+)
+
+// ComponentFlags contains information for specifying the component.
+type ComponentFlags struct {
+ Component string
+}
+
+func NewComponentFlags() *ComponentFlags {
+ return &ComponentFlags{
+ Component: extension.DefaultExtension,
+ }
+}
+
+func (f *ComponentFlags) BindFlags(fs *pflag.FlagSet) {
+ fs.StringVar(&f.Component,
+ "component",
+ f.Component,
+ "specify the component to enable")
+}
diff --git a/pkg/flags/concurrency.go b/pkg/flags/concurrency.go
new file mode 100644
index 0000000..2db07c7
--- /dev/null
+++ b/pkg/flags/concurrency.go
@@ -0,0 +1,23 @@
+package flags
+
+import "github.com/spf13/pflag"
+
+// ConcurrencyFlags contains information for configuring concurrency
+type ConcurrencyFlags struct {
+ MaxConcurency int
+}
+
+func NewConcurrencyFlags() *ConcurrencyFlags {
+ return &ConcurrencyFlags{
+ MaxConcurency: 10,
+ }
+}
+
+func (f *ConcurrencyFlags) BindFlags(fs *pflag.FlagSet) {
+ fs.IntVarP(&f.MaxConcurency,
+ "max-concurrency",
+ "c",
+ f.MaxConcurency,
+ "maximum number of tests to run in parallel",
+ )
+}
diff --git a/pkg/flags/names.go b/pkg/flags/names.go
new file mode 100644
index 0000000..b073f1a
--- /dev/null
+++ b/pkg/flags/names.go
@@ -0,0 +1,24 @@
+package flags
+
+import (
+ "github.com/spf13/pflag"
+)
+
+// NamesFlags contains information for specifying multiple test names.
+type NamesFlags struct {
+ Names []string
+}
+
+func NewNamesFlags() *NamesFlags {
+ return &NamesFlags{
+ Names: []string{},
+ }
+}
+
+func (f *NamesFlags) BindFlags(fs *pflag.FlagSet) {
+ fs.StringSliceVarP(&f.Names,
+ "names",
+ "n",
+ f.Names,
+ "specify test name (can be specified multiple times)")
+}
diff --git a/pkg/flags/output.go b/pkg/flags/output.go
new file mode 100644
index 0000000..24f49f6
--- /dev/null
+++ b/pkg/flags/output.go
@@ -0,0 +1,95 @@
+package flags
+
+import (
+ "encoding/json"
+ "reflect"
+ "strings"
+
+ "github.com/pkg/errors"
+ "github.com/spf13/pflag"
+)
+
+// OutputFlags contains information for specifying multiple test names.
+type OutputFlags struct {
+ Output string
+}
+
+func NewOutputFlags() *OutputFlags {
+ return &OutputFlags{
+ Output: "json",
+ }
+}
+
+func (f *OutputFlags) BindFlags(fs *pflag.FlagSet) {
+ fs.StringVarP(&f.Output,
+ "output",
+ "o",
+ f.Output,
+ "output mode")
+}
+
+func (o *OutputFlags) Marshal(v interface{}) ([]byte, error) {
+ switch o.Output {
+ case "", "json":
+ j, err := json.MarshalIndent(&v, "", " ")
+ if err != nil {
+ return nil, err
+ }
+ return j, nil
+ case "jsonl":
+ // Check if v is a slice or array
+ val := reflect.ValueOf(v)
+ if val.Kind() == reflect.Slice || val.Kind() == reflect.Array {
+ var result []byte
+ for i := 0; i < val.Len(); i++ {
+ item := val.Index(i).Interface()
+ j, err := json.Marshal(item)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, j...)
+ result = append(result, '\n') // Append newline after each item
+ }
+ return result, nil
+ }
+ return nil, errors.New("jsonl format requires a slice or array")
+ case "names":
+ val := reflect.ValueOf(v)
+ if val.Kind() == reflect.Slice || val.Kind() == reflect.Array {
+ var names []string
+ outerLoop:
+ for i := 0; i < val.Len(); i++ {
+ item := val.Index(i)
+ // Check for Name() or Identifier() methods
+ itemInterface := item.Interface()
+ nameFuncs := []string{"Name", "Identifier"}
+ for _, fn := range nameFuncs {
+ method := reflect.ValueOf(itemInterface).MethodByName(fn)
+ if method.IsValid() && method.Kind() == reflect.Func && method.Type().NumIn() == 0 && method.Type().NumOut() == 1 && method.Type().Out(0).Kind() == reflect.String {
+ name := method.Call(nil)[0].String()
+ names = append(names, name)
+ continue outerLoop
+ }
+ }
+
+ // Dereference pointer if needed
+ if item.Kind() == reflect.Ptr {
+ item = item.Elem()
+ }
+ // Check for struct with Name field
+ if item.Kind() == reflect.Struct {
+ nameField := item.FieldByName("Name")
+ if nameField.IsValid() && nameField.Kind() == reflect.String {
+ names = append(names, nameField.String())
+ }
+ } else {
+ return nil, errors.New("items must have a Name field or a Name() method")
+ }
+ }
+ return []byte(strings.Join(names, "\n")), nil
+ }
+ return nil, errors.New("names format requires an array of structs")
+ default:
+ return nil, errors.Errorf("invalid output format: %s", o.Output)
+ }
+}
diff --git a/pkg/flags/suite.go b/pkg/flags/suite.go
new file mode 100644
index 0000000..23de832
--- /dev/null
+++ b/pkg/flags/suite.go
@@ -0,0 +1,21 @@
+package flags
+
+import (
+ "github.com/spf13/pflag"
+)
+
+// SuiteFlags contains information for specifying the suite.
+type SuiteFlags struct {
+ Suite string
+}
+
+func NewSuiteFlags() *SuiteFlags {
+ return &SuiteFlags{}
+}
+
+func (f *SuiteFlags) BindFlags(fs *pflag.FlagSet) {
+ fs.StringVar(&f.Suite,
+ "suite",
+ f.Suite,
+ "specify the suite to use")
+}
diff --git a/pkg/ginkgo/types.go b/pkg/ginkgo/types.go
new file mode 100644
index 0000000..942a1ee
--- /dev/null
+++ b/pkg/ginkgo/types.go
@@ -0,0 +1,14 @@
+package ginkgo
+
+import "github.com/onsi/ginkgo/v2/types"
+
+type TestCase struct {
+ Name string
+ locations []types.CodeLocation
+ spec types.TestSpec
+ Labels []string `json:",omitempty"`
+}
+
+type ExitError struct {
+ Code int
+}
diff --git a/pkg/ginkgo/util.go b/pkg/ginkgo/util.go
new file mode 100644
index 0000000..f4aa2ea
--- /dev/null
+++ b/pkg/ginkgo/util.go
@@ -0,0 +1,153 @@
+package ginkgo
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/ginkgo/v2/types"
+ "github.com/onsi/gomega"
+ "github.com/pkg/errors"
+ "k8s.io/apimachinery/pkg/util/sets"
+
+ ext "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+)
+
+func configureGinkgo() (*types.SuiteConfig, *types.ReporterConfig, error) {
+ if !ginkgo.GetSuite().InPhaseBuildTree() {
+ if err := ginkgo.GetSuite().BuildTree(); err != nil {
+ return nil, nil, errors.Wrapf(err, "couldn't build ginkgo tree")
+ }
+ }
+
+ // Ginkgo initialization
+ ginkgo.GetSuite().ClearBeforeAndAfterSuiteNodes()
+ suiteConfig, reporterConfig := ginkgo.GinkgoConfiguration()
+ suiteConfig.RandomizeAllSpecs = true
+ suiteConfig.Timeout = 24 * time.Hour
+ reporterConfig.NoColor = true
+ reporterConfig.Verbose = true
+ ginkgo.SetReporterConfig(reporterConfig)
+
+ // Throw away all output
+ ginkgo.GinkgoWriter = GinkgoDiscard
+
+ gomega.RegisterFailHandler(ginkgo.Fail)
+
+ return &suiteConfig, &reporterConfig, nil
+}
+
+func BuildExtensionTestSpecsFromOpenShiftGinkgoSuite() (ext.ExtensionTestSpecs, error) {
+ var tests []*ext.ExtensionTestSpec
+ var enforceSerialExecutionForGinkgo sync.Mutex // in-process parallelization for ginkgo is impossible so far
+
+ if _, _, err := configureGinkgo(); err != nil {
+ return nil, err
+ }
+
+ cwd, err := os.Getwd()
+ if err != nil {
+ return nil, errors.Wrap(err, "couldn't get current working directory")
+ }
+
+ ginkgo.GetSuite().WalkTests(func(name string, spec types.TestSpec) {
+ testCase := &ext.ExtensionTestSpec{
+ Name: spec.Text(),
+ Labels: sets.New[string](spec.Labels()...),
+ Lifecycle: GetLifecycle(spec.Labels()),
+ Run: func() *ext.ExtensionTestResult {
+ enforceSerialExecutionForGinkgo.Lock()
+ defer enforceSerialExecutionForGinkgo.Unlock()
+
+ suiteConfig, reporterConfig, _ := configureGinkgo()
+
+ result := &ext.ExtensionTestResult{
+ Name: spec.Text(),
+ }
+
+ var summary types.SpecReport
+ ginkgo.GetSuite().RunSpec(spec, ginkgo.Labels{}, "", cwd, ginkgo.GetFailer(), GinkgoDiscard, *suiteConfig,
+ *reporterConfig)
+ for _, report := range ginkgo.GetSuite().GetReport().SpecReports {
+ if report.NumAttempts > 0 {
+ summary = report
+ }
+ }
+
+ result.Output = summary.CapturedGinkgoWriterOutput
+ result.Error = summary.CapturedStdOutErr
+
+ switch {
+ case summary.State == types.SpecStatePassed:
+ result.Result = ext.ResultPassed
+ case summary.State == types.SpecStateSkipped:
+ result.Result = ext.ResultSkipped
+ case summary.State == types.SpecStateFailed, summary.State == types.SpecStatePanicked, summary.State == types.SpecStateInterrupted:
+ result.Result = ext.ResultFailed
+ var errors []string
+ if len(summary.Failure.ForwardedPanic) > 0 {
+ if len(summary.Failure.Location.FullStackTrace) > 0 {
+ errors = append(errors, fmt.Sprintf("\n%s\n", summary.Failure.Location.FullStackTrace))
+ }
+ errors = append(errors, fmt.Sprintf("fail [%s:%d]: Test Panicked: %s", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic))
+ }
+ errors = append(errors, fmt.Sprintf("fail [%s:%d]: %s", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message))
+ result.Error = strings.Join(errors, "\n")
+ default:
+ panic(fmt.Sprintf("test produced unknown outcome: %#v", summary))
+ }
+
+ return result
+ },
+ }
+ tests = append(tests, testCase)
+ })
+
+ return tests, nil
+}
+
+func Informing() ginkgo.Labels {
+ return ginkgo.Label(fmt.Sprintf("Lifecycle:%s", ext.LifecycleInforming))
+}
+
+func Slow() ginkgo.Labels {
+ return ginkgo.Label("SLOW")
+}
+
+func Blocking() ginkgo.Labels {
+ return ginkgo.Label(fmt.Sprintf("Lifecycle:%s", ext.LifecycleBlocking))
+}
+
+func GetLifecycle(labels ginkgo.Labels) ext.Lifecycle {
+ for _, label := range labels {
+ res := strings.Split(label, ":")
+ if len(res) != 2 || !strings.EqualFold(res[0], "lifecycle") {
+ continue
+ }
+ return MustLifecycle(res[1]) // this panics if unsupported lifecycle is used
+ }
+
+ return ext.LifecycleBlocking
+}
+
+func MustLifecycle(l string) ext.Lifecycle {
+ switch ext.Lifecycle(l) {
+ case ext.LifecycleInforming, ext.LifecycleBlocking:
+ return ext.Lifecycle(l)
+ default:
+ panic(fmt.Sprintf("unknown test lifecycle: %s", l))
+ }
+}
+
+func lastFilenameSegment(filename string) string {
+ if parts := strings.Split(filename, "/vendor/"); len(parts) > 1 {
+ return parts[len(parts)-1]
+ }
+ if parts := strings.Split(filename, "/src/"); len(parts) > 1 {
+ return parts[len(parts)-1]
+ }
+ return filename
+}
diff --git a/pkg/ginkgo/writer.go b/pkg/ginkgo/writer.go
new file mode 100644
index 0000000..46964ad
--- /dev/null
+++ b/pkg/ginkgo/writer.go
@@ -0,0 +1,39 @@
+package ginkgo
+
+import (
+ "io"
+)
+
+// GinkgoDiscard is used to throw away all output, we collect the output from the spec report summaries.
+var GinkgoDiscard = ginkgoDiscard{
+ io.Discard,
+}
+
+type ginkgoDiscard struct {
+ io.Writer
+}
+
+func (ginkgoDiscard) Print(_ ...interface{}) {
+}
+
+func (ginkgoDiscard) Printf(_ string, _ ...interface{}) {
+}
+
+func (ginkgoDiscard) Println(_ ...interface{}) {
+}
+
+func (ginkgoDiscard) TeeTo(_ io.Writer) {
+}
+
+func (ginkgoDiscard) Truncate() {
+}
+
+func (ginkgoDiscard) Bytes() []byte {
+ return nil
+}
+func (ginkgoDiscard) Len() int {
+ return 0
+}
+
+func (ginkgoDiscard) ClearTeeWriters() {
+}
diff --git a/pkg/time/time.go b/pkg/time/time.go
new file mode 100644
index 0000000..d5b17a8
--- /dev/null
+++ b/pkg/time/time.go
@@ -0,0 +1,22 @@
+package time
+
+import "time"
+
+// DBTime is a type suitable for direct importing into databases like BigQuery,
+// formatted like 2006-01-02 15:04:05.000000 UTC.
+type DBTime time.Time
+
+func (dbt *DBTime) MarshalJSON() ([]byte, error) {
+ formattedTime := time.Time(*dbt).Format(`"2006-01-02 15:04:05.000000 UTC"`)
+ return []byte(formattedTime), nil
+}
+
+func (dbt *DBTime) UnmarshalJSON(b []byte) error {
+ timeStr := string(b[1 : len(b)-1])
+ parsedTime, err := time.Parse("2006-01-02 15:04:05.000000 UTC", timeStr)
+ if err != nil {
+ return err
+ }
+ *dbt = (DBTime)(parsedTime)
+ return nil
+}
diff --git a/pkg/version/version.go b/pkg/version/version.go
new file mode 100644
index 0000000..7d6a330
--- /dev/null
+++ b/pkg/version/version.go
@@ -0,0 +1,11 @@
+package version
+
+var (
+ // CommitFromGit is a constant representing the source version that
+ // generated this build. It should be set during build via -ldflags.
+ CommitFromGit string
+ // BuildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
+ BuildDate string
+ // GitTreeState has the state of git tree, either "clean" or "dirty"
+ GitTreeState string
+)
diff --git a/test/example/example.go b/test/example/example.go
new file mode 100644
index 0000000..2dfd579
--- /dev/null
+++ b/test/example/example.go
@@ -0,0 +1,54 @@
+package example
+
+import (
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ g "github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo"
+)
+
+var flags struct {
+ beforeAllCount int
+ beforeEachCount int
+}
+
+var _ = Describe("[sig-testing] openshift-tests-extension ordered", Ordered, func() {
+ BeforeAll(func() {
+ flags.beforeAllCount++
+ })
+
+ It("should run beforeAll once", func() {
+ Expect(flags.beforeAllCount).To(Equal(1))
+ })
+})
+
+var _ = Describe("[sig-testing] openshift-tests-extension setup", func() {
+ BeforeEach(func() {
+ flags.beforeEachCount++
+ })
+
+ It("should support beforeEach", func() {
+ Expect(flags.beforeEachCount).To(BeNumerically(">", 0))
+ })
+})
+
+var _ = Describe("[sig-testing] openshift-tests-extension", func() {
+ It("should support passing tests", func() {
+ Expect(true).To(BeTrue())
+ })
+
+ It("should support failing tests", func() {
+ Expect(1).To(Equal(2))
+ })
+
+ It("should support panicking tests", func() {
+ panic("oh no")
+ })
+
+ It("should support slow tests", g.Slow(), func() {
+ time.Sleep(15 * time.Second)
+ Expect(true).To(BeTrue())
+ })
+})
diff --git a/test/framework/framework.go b/test/framework/framework.go
new file mode 100644
index 0000000..32d3216
--- /dev/null
+++ b/test/framework/framework.go
@@ -0,0 +1,34 @@
+package framework
+
+import (
+ "os"
+ "os/exec"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+// These tests have to be run from the root of the repository, as ginkgo seems to mess up
+// the CWD and runtime Caller doesn't get the right path. FIXME?
+const binary = "./example-tests"
+
+var _ = Describe("[sig-testing] openshift-tests-extension", Ordered, Label("framework"), func() {
+ It("should be run from the main directory", func() {
+ _, err := os.Stat("Makefile")
+ Expect(err).ShouldNot(HaveOccurred(), "Expected to be run from the root directory")
+ })
+
+ It("should run `make` command successfully", func() {
+ cmd := exec.Command("make", "example-tests")
+
+ // Run the command
+ err := cmd.Run()
+ Expect(err).ShouldNot(HaveOccurred(), "Expected `make` to run successfully")
+ })
+
+ It("should have the example-tests binary", func() {
+ _, err := os.Stat(binary)
+ Expect(err).ShouldNot(HaveOccurred(), "Expected `example-tests` binary to exist")
+ })
+
+})
diff --git a/test/framework/info.go b/test/framework/info.go
new file mode 100644
index 0000000..b3244b4
--- /dev/null
+++ b/test/framework/info.go
@@ -0,0 +1,43 @@
+package framework
+
+import (
+ "encoding/json"
+ "os/exec"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+)
+
+var _ = Describe("[sig-testing] example-tests info", Label("framework"), func() {
+ var result extension.Extension
+
+ BeforeEach(func() {
+ cmd := exec.Command(binary, "info")
+ output, err := cmd.Output()
+ Expect(err).ShouldNot(HaveOccurred(), "Expected `example-tests info` to run successfully")
+ // Unmarshal the JSON output
+ err = json.Unmarshal(output, &result)
+ Expect(err).ShouldNot(HaveOccurred(), "Expected JSON output to unmarshal into Extension struct")
+ })
+
+ It("should have build metadata", func() {
+ Expect(result.Source.Commit).To(Not(BeEmpty()))
+ Expect(result.Source.BuildDate).To(Not(BeEmpty()))
+ Expect(result.Source.GitTreeState).To(Not(BeEmpty()))
+ })
+
+ It("should have the expected suites", func() {
+ Expect(result.Suites).To(HaveLen(3), "expected 3 suites")
+ Expect(result.Suites).To(ContainElement(HaveField("Name", Equal("example/tests"))), "Expected to contain a suite with name 'example/tests'")
+ Expect(result.Suites).To(ContainElement(HaveField("Name", Equal("example/fast"))),
+ "Expected to contain a suite with name 'example/fast'")
+ })
+
+ It("should have the correct component information", func() {
+ Expect(result.Component.Product).To(Equal("openshift"), "Expected product to be 'openshift'")
+ Expect(result.Component.Kind).To(Equal("payload"), "Expected type to be 'payload'")
+ Expect(result.Component.Name).To(Equal("example-tests"), "Expected name to be 'default'")
+ })
+})
diff --git a/test/framework/list.go b/test/framework/list.go
new file mode 100644
index 0000000..ba2372a
--- /dev/null
+++ b/test/framework/list.go
@@ -0,0 +1,43 @@
+package framework
+
+import (
+ "encoding/json"
+ "os/exec"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ e "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+)
+
+var _ = Describe("[sig-testing] example-tests list", Label("framework"), func() {
+ It("should list all specs", func() {
+ specs := runList()
+ Expect(len(specs)).Should(BeNumerically(">", 5))
+ })
+
+ It("should populate fields", func() {
+ specs := runList()
+ Expect(specs[0]).To(HaveField("Lifecycle", Not(BeEmpty())))
+ Expect(specs[0]).To(HaveField("Source", "openshift:payload:example-tests"))
+ })
+
+ It("should filter specs by suite", func() {
+ specs := runList("--suite", "example/fast")
+ for _, spec := range specs {
+ Expect(spec.Labels).ToNot(HaveKey("SLOW"))
+ }
+ })
+})
+
+func runList(args ...string) e.ExtensionTestSpecs {
+ var result e.ExtensionTestSpecs
+ args = append([]string{"list"}, args...)
+ cmd := exec.Command(binary, args...)
+ output, err := cmd.Output()
+ Expect(err).ShouldNot(HaveOccurred(), "Expected `example-tests info` to run successfully")
+ // Unmarshal the JSON output
+ err = json.Unmarshal(output, &result)
+ Expect(err).ShouldNot(HaveOccurred(), "Expected JSON output to unmarshal into Extension struct")
+ return result
+}
diff --git a/test/framework/run.go b/test/framework/run.go
new file mode 100644
index 0000000..ab2de44
--- /dev/null
+++ b/test/framework/run.go
@@ -0,0 +1,78 @@
+package framework
+
+import (
+ "encoding/json"
+ "errors"
+ "os/exec"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ e "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+)
+
+var _ = Describe("[sig-testing] example-tests run-suite", Label("framework"), func() {
+ var result e.ExtensionTestResults
+ var output []byte
+ var cmdErr error
+
+ BeforeEach(func() {
+ cmd := exec.Command("./example-tests", "run-suite", "example/fast")
+
+ // Capture both stdout and stderr
+ output, cmdErr = cmd.Output()
+
+ // Expect command to exit with a non-zero status (exit code 1 for failed tests)
+ var exitErr *exec.ExitError
+ ok := errors.As(cmdErr, &exitErr)
+ Expect(ok).To(BeTrue(), "Expected command to exit with a non-zero status")
+ Expect(exitErr.ExitCode()).To(Equal(1), "Expected exit code 1")
+
+ // Unmarshal the JSON output into the predefined ExtensionTestResults type
+ err := json.Unmarshal(output, &result)
+ Expect(err).ShouldNot(HaveOccurred(), "Expected JSON output to unmarshal into ExtensionTestResults")
+ })
+
+ It("should contain a test that passed", func() {
+ var foundPassed bool
+ for _, test := range result {
+ if test.Result == "passed" {
+ foundPassed = true
+ break
+ }
+ }
+ Expect(foundPassed).To(BeTrue(), "Expected at least one test to have passed")
+ })
+
+ It("should have the correct error message for the failed test", func() {
+ for _, test := range result {
+ if test.Name == "[sig-testing] openshift-tests-extension should support panicking tests" && test.Result == "failed" {
+ Expect(test.Error).To(ContainSubstring("Test Panicked: oh no"), "Expected error to contain 'Test Panicked: oh no'")
+ break
+ }
+ }
+ })
+
+ It("fast suite should not have a slow test", func() {
+ foundTest := false
+ for _, test := range result {
+ if test.Name == "[sig-testing] openshift-tests-extension should support slow tests" {
+ foundTest = true
+ break
+ }
+ }
+ Expect(foundTest).To(BeFalse(), "Expected to not find a slow test")
+ })
+
+ /*It("slow suite should contain a slow test", func() {
+ foundTest := false
+ for _, test := range result {
+ if test.Name == "[sig-testing] openshift-tests-extension should support slow tests" {
+ foundTest = true
+ Expect(test.Duration).To(BeNumerically(">=", 15000), "Expected slow test to take at least 15 seconds")
+ break
+ }
+ }
+ Expect(foundTest).To(BeTrue(), "Expected to find a slow test")
+ })*/
+})
diff --git a/test/framework/update.go b/test/framework/update.go
new file mode 100644
index 0000000..83af475
--- /dev/null
+++ b/test/framework/update.go
@@ -0,0 +1 @@
+package framework
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
new file mode 100644
index 0000000..52cf18e
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
@@ -0,0 +1,26 @@
+Copyright 2021 The ANTLR Project
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ 3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
new file mode 100644
index 0000000..ab51212
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
@@ -0,0 +1,68 @@
+/*
+Package antlr implements the Go version of the ANTLR 4 runtime.
+
+# The ANTLR Tool
+
+ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
+or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
+From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
+(or visitor) that makes it easy to respond to the recognition of phrases of interest.
+
+# Code Generation
+
+ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
+runtime library, written specifically to support the generated code in the target language. This library is the
+runtime for the Go target.
+
+To generate code for the go target, it is generally recommended to place the source grammar files in a package of
+their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
+it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
+that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other
+way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
+your IDE, or configuration in your CI system.
+
+Here is a general template for an ANTLR based recognizer in Go:
+
+ .
+ ├── myproject
+ ├── parser
+ │ ├── mygrammar.g4
+ │ ├── antlr-4.12.0-complete.jar
+ │ ├── error_listeners.go
+ │ ├── generate.go
+ │ ├── generate.sh
+ ├── go.mod
+ ├── go.sum
+ ├── main.go
+ └── main_test.go
+
+Make sure that the package statement in your grammar file(s) reflects the go package they exist in.
+The generate.go file then looks like this:
+
+ package parser
+
+ //go:generate ./generate.sh
+
+And the generate.sh file will look similar to this:
+
+ #!/bin/sh
+
+ alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
+ antlr4 -Dlanguage=Go -no-visitor -package parser *.g4
+
+depending on whether you want visitors or listeners or any other ANTLR options.
+
+From the command line at the root of your package “myproject” you can then simply issue the command:
+
+ go generate ./...
+
+# Copyright Notice
+
+Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+
+Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
+
+[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
+[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
+*/
+package antlr
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
new file mode 100644
index 0000000..98010d2
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
@@ -0,0 +1,176 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "sync"
+
+// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or
+// which is invalid for a particular struct such as [*antlr.BaseRuleContext]
+var ATNInvalidAltNumber int
+
+// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term
+// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]”
+// in existence.
+//
+// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)].
+//
+// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network
+// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
+// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
+type ATN struct {
+ // DecisionToState is the decision points for all rules, subrules, optional
+ // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we
+ // can go back later and build DFA predictors for them. This includes
+ // all the rules, subrules, optional blocks, ()+, ()* etc...
+ DecisionToState []DecisionState
+
+ // grammarType is the ATN type and is used for deserializing ATNs from strings.
+ grammarType int
+
+ // lexerActions is referenced by action transitions in the ATN for lexer ATNs.
+ lexerActions []LexerAction
+
+ // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN.
+ maxTokenType int
+
+ modeNameToStartState map[string]*TokensStartState
+
+ modeToStartState []*TokensStartState
+
+ // ruleToStartState maps from rule index to starting state number.
+ ruleToStartState []*RuleStartState
+
+ // ruleToStopState maps from rule index to stop state number.
+ ruleToStopState []*RuleStopState
+
+ // ruleToTokenType maps the rule index to the resulting token type for lexer
+ // ATNs. For parser ATNs, it maps the rule index to the generated bypass token
+ // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was
+ // specified, and otherwise is nil.
+ ruleToTokenType []int
+
+ states []ATNState
+
+ mu sync.Mutex
+ stateMu sync.RWMutex
+ edgeMu sync.RWMutex
+}
+
+// NewATN returns a new ATN struct representing the given grammarType and is used
+// for runtime deserialization of ATNs from the code generated by the ANTLR tool
+func NewATN(grammarType int, maxTokenType int) *ATN {
+ return &ATN{
+ grammarType: grammarType,
+ maxTokenType: maxTokenType,
+ modeNameToStartState: make(map[string]*TokensStartState),
+ }
+}
+
+// NextTokensInContext computes and returns the set of valid tokens that can occur starting
+// in state s. If ctx is nil, the set of tokens will not include what can follow
+// the rule surrounding s. In other words, the set will be restricted to tokens
+// reachable staying within the rule of s.
+func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
+ return NewLL1Analyzer(a).Look(s, nil, ctx)
+}
+
+// NextTokensNoContext computes and returns the set of valid tokens that can occur starting
+// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of
+// rule.
+func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ iset := s.GetNextTokenWithinRule()
+ if iset == nil {
+ iset = a.NextTokensInContext(s, nil)
+ iset.readOnly = true
+ s.SetNextTokenWithinRule(iset)
+ }
+ return iset
+}
+
+// NextTokens computes and returns the set of valid tokens starting in state s, by
+// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil).
+func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
+ if ctx == nil {
+ return a.NextTokensNoContext(s)
+ }
+
+ return a.NextTokensInContext(s, ctx)
+}
+
+func (a *ATN) addState(state ATNState) {
+ if state != nil {
+ state.SetATN(a)
+ state.SetStateNumber(len(a.states))
+ }
+
+ a.states = append(a.states, state)
+}
+
+func (a *ATN) removeState(state ATNState) {
+ a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice
+}
+
+func (a *ATN) defineDecisionState(s DecisionState) int {
+ a.DecisionToState = append(a.DecisionToState, s)
+ s.setDecision(len(a.DecisionToState) - 1)
+
+ return s.getDecision()
+}
+
+func (a *ATN) getDecisionState(decision int) DecisionState {
+ if len(a.DecisionToState) == 0 {
+ return nil
+ }
+
+ return a.DecisionToState[decision]
+}
+
+// getExpectedTokens computes the set of input symbols which could follow ATN
+// state number stateNumber in the specified full parse context ctx and returns
+// the set of potentially valid input symbols which could follow the specified
+// state in the specified context. This method considers the complete parser
+// context, but does not evaluate semantic predicates (i.e. all predicates
+// encountered during the calculation are assumed true). If a path in the ATN
+// exists from the starting state to the RuleStopState of the outermost context
+// without Matching any symbols, Token.EOF is added to the returned set.
+//
+// A nil ctx defaults to ParserRuleContext.EMPTY.
+//
+// It panics if the ATN does not contain state stateNumber.
+func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet {
+ if stateNumber < 0 || stateNumber >= len(a.states) {
+ panic("Invalid state number.")
+ }
+
+ s := a.states[stateNumber]
+ following := a.NextTokens(s, nil)
+
+ if !following.contains(TokenEpsilon) {
+ return following
+ }
+
+ expected := NewIntervalSet()
+
+ expected.addSet(following)
+ expected.removeOne(TokenEpsilon)
+
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := a.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+
+ following = a.NextTokens(rt.(*RuleTransition).followState, nil)
+ expected.addSet(following)
+ expected.removeOne(TokenEpsilon)
+ ctx = ctx.GetParent().(RuleContext)
+ }
+
+ if following.contains(TokenEpsilon) {
+ expected.addOne(TokenEOF)
+ }
+
+ return expected
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
new file mode 100644
index 0000000..7619fa1
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
@@ -0,0 +1,303 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
+// context). The syntactic context is a graph-structured stack node whose
+// path(s) to the root is the rule invocation(s) chain used to arrive at the
+// state. The semantic context is the tree of semantic predicates encountered
+// before reaching an ATN state.
+type ATNConfig interface {
+ Equals(o Collectable[ATNConfig]) bool
+ Hash() int
+
+ GetState() ATNState
+ GetAlt() int
+ GetSemanticContext() SemanticContext
+
+ GetContext() PredictionContext
+ SetContext(PredictionContext)
+
+ GetReachesIntoOuterContext() int
+ SetReachesIntoOuterContext(int)
+
+ String() string
+
+ getPrecedenceFilterSuppressed() bool
+ setPrecedenceFilterSuppressed(bool)
+}
+
+type BaseATNConfig struct {
+ precedenceFilterSuppressed bool
+ state ATNState
+ alt int
+ context PredictionContext
+ semanticContext SemanticContext
+ reachesIntoOuterContext int
+}
+
+func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup
+ return &BaseATNConfig{
+ state: old.state,
+ alt: old.alt,
+ context: old.context,
+ semanticContext: old.semanticContext,
+ reachesIntoOuterContext: old.reachesIntoOuterContext,
+ }
+}
+
+func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
+ return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
+}
+
+func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Necessary?
+ }
+
+ return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
+}
+
+func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
+}
+
+func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
+}
+
+func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
+}
+
+func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
+}
+
+func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil")
+ }
+
+ return &BaseATNConfig{
+ state: state,
+ alt: c.GetAlt(),
+ context: context,
+ semanticContext: semanticContext,
+ reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
+ precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
+ }
+}
+
+func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
+ return b.precedenceFilterSuppressed
+}
+
+func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
+ b.precedenceFilterSuppressed = v
+}
+
+func (b *BaseATNConfig) GetState() ATNState {
+ return b.state
+}
+
+func (b *BaseATNConfig) GetAlt() int {
+ return b.alt
+}
+
+func (b *BaseATNConfig) SetContext(v PredictionContext) {
+ b.context = v
+}
+func (b *BaseATNConfig) GetContext() PredictionContext {
+ return b.context
+}
+
+func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
+ return b.semanticContext
+}
+
+func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
+ return b.reachesIntoOuterContext
+}
+
+func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
+ b.reachesIntoOuterContext = v
+}
+
+// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
+// for a collection.
+//
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool {
+ if b == o {
+ return true
+ } else if o == nil {
+ return false
+ }
+
+ var other, ok = o.(*BaseATNConfig)
+
+ if !ok {
+ return false
+ }
+
+ var equal bool
+
+ if b.context == nil {
+ equal = other.context == nil
+ } else {
+ equal = b.context.Equals(other.context)
+ }
+
+ var (
+ nums = b.state.GetStateNumber() == other.state.GetStateNumber()
+ alts = b.alt == other.alt
+ cons = b.semanticContext.Equals(other.semanticContext)
+ sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
+ )
+
+ return nums && alts && cons && sups && equal
+}
+
+// Hash is the default hash function for BaseATNConfig, when no specialist hash function
+// is required for a collection
+func (b *BaseATNConfig) Hash() int {
+ var c int
+ if b.context != nil {
+ c = b.context.Hash()
+ }
+
+ h := murmurInit(7)
+ h = murmurUpdate(h, b.state.GetStateNumber())
+ h = murmurUpdate(h, b.alt)
+ h = murmurUpdate(h, c)
+ h = murmurUpdate(h, b.semanticContext.Hash())
+ return murmurFinish(h, 4)
+}
+
+func (b *BaseATNConfig) String() string {
+ var s1, s2, s3 string
+
+ if b.context != nil {
+ s1 = ",[" + fmt.Sprint(b.context) + "]"
+ }
+
+ if b.semanticContext != SemanticContextNone {
+ s2 = "," + fmt.Sprint(b.semanticContext)
+ }
+
+ if b.reachesIntoOuterContext > 0 {
+ s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
+ }
+
+ return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
+}
+
+type LexerATNConfig struct {
+ *BaseATNConfig
+ lexerActionExecutor *LexerActionExecutor
+ passedThroughNonGreedyDecision bool
+}
+
+func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
+}
+
+func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
+ lexerActionExecutor: lexerActionExecutor,
+ }
+}
+
+func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
+ lexerActionExecutor: c.lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
+ lexerActionExecutor: lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
+ lexerActionExecutor: c.lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
+}
+
+// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (l *LexerATNConfig) Hash() int {
+ var f int
+ if l.passedThroughNonGreedyDecision {
+ f = 1
+ } else {
+ f = 0
+ }
+ h := murmurInit(7)
+ h = murmurUpdate(h, l.state.GetStateNumber())
+ h = murmurUpdate(h, l.alt)
+ h = murmurUpdate(h, l.context.Hash())
+ h = murmurUpdate(h, l.semanticContext.Hash())
+ h = murmurUpdate(h, f)
+ h = murmurUpdate(h, l.lexerActionExecutor.Hash())
+ h = murmurFinish(h, 6)
+ return h
+}
+
+// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool {
+ if l == other {
+ return true
+ }
+ var othert, ok = other.(*LexerATNConfig)
+
+ if l == other {
+ return true
+ } else if !ok {
+ return false
+ } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
+ return false
+ }
+
+ var b bool
+
+ if l.lexerActionExecutor != nil {
+ b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor)
+ } else {
+ b = othert.lexerActionExecutor != nil
+ }
+
+ if b {
+ return false
+ }
+
+ return l.BaseATNConfig.Equals(othert.BaseATNConfig)
+}
+
+func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
+ var ds, ok = target.(DecisionState)
+
+ return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
new file mode 100644
index 0000000..43e9b33
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
@@ -0,0 +1,441 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+type ATNConfigSet interface {
+ Hash() int
+ Equals(o Collectable[ATNConfig]) bool
+ Add(ATNConfig, *DoubleDict) bool
+ AddAll([]ATNConfig) bool
+
+ GetStates() *JStore[ATNState, Comparator[ATNState]]
+ GetPredicates() []SemanticContext
+ GetItems() []ATNConfig
+
+ OptimizeConfigs(interpreter *BaseATNSimulator)
+
+ Length() int
+ IsEmpty() bool
+ Contains(ATNConfig) bool
+ ContainsFast(ATNConfig) bool
+ Clear()
+ String() string
+
+ HasSemanticContext() bool
+ SetHasSemanticContext(v bool)
+
+ ReadOnly() bool
+ SetReadOnly(bool)
+
+ GetConflictingAlts() *BitSet
+ SetConflictingAlts(*BitSet)
+
+ Alts() *BitSet
+
+ FullContext() bool
+
+ GetUniqueAlt() int
+ SetUniqueAlt(int)
+
+ GetDipsIntoOuterContext() bool
+ SetDipsIntoOuterContext(bool)
+}
+
+// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
+// about its elements and can combine similar configurations using a
+// graph-structured stack.
+type BaseATNConfigSet struct {
+ cachedHash int
+
+ // configLookup is used to determine whether two BaseATNConfigSets are equal. We
+ // need all configurations with the same (s, i, _, semctx) to be equal. A key
+ // effectively doubles the number of objects associated with ATNConfigs. All
+ // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
+ // read-only because a set becomes a DFA state.
+ configLookup *JStore[ATNConfig, Comparator[ATNConfig]]
+
+ // configs is the added elements.
+ configs []ATNConfig
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves recomputation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ conflictingAlts *BitSet
+
+ // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
+ // we hit a pred while computing a closure operation. Do not make a DFA state
+ // from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
+ dipsIntoOuterContext bool
+
+ // fullCtx is whether it is part of a full context LL prediction. Used to
+ // determine how to merge $. It is a wildcard with SLL, but not for an LL
+ // context merge.
+ fullCtx bool
+
+ // Used in parser and lexer. In lexer, it indicates we hit a pred
+ // while computing a closure operation. Don't make a DFA state from a.
+ hasSemanticContext bool
+
+ // readOnly is whether it is read-only. Do not
+ // allow any code to manipulate the set if true because DFA states will point at
+ // sets and those must not change. It not, protect other fields; conflictingAlts
+ // in particular, which is assigned after readOnly.
+ readOnly bool
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves recomputation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ uniqueAlt int
+}
+
+func (b *BaseATNConfigSet) Alts() *BitSet {
+ alts := NewBitSet()
+ for _, it := range b.configs {
+ alts.add(it.GetAlt())
+ }
+ return alts
+}
+
+func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
+ return &BaseATNConfigSet{
+ cachedHash: -1,
+ configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst),
+ fullCtx: fullCtx,
+ }
+}
+
+// Add merges contexts with existing configs for (s, i, pi, _), where s is the
+// ATNConfig.state, i is the ATNConfig.alt, and pi is the
+// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
+// dipsIntoOuterContext and hasSemanticContext when necessary.
+func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if config.GetSemanticContext() != SemanticContextNone {
+ b.hasSemanticContext = true
+ }
+
+ if config.GetReachesIntoOuterContext() > 0 {
+ b.dipsIntoOuterContext = true
+ }
+
+ existing, present := b.configLookup.Put(config)
+
+ // The config was not already in the set
+ //
+ if !present {
+ b.cachedHash = -1
+ b.configs = append(b.configs, config) // Track order here
+ return true
+ }
+
+ // Merge a previous (s, i, pi, _) with it and save the result
+ rootIsWildcard := !b.fullCtx
+ merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
+
+ // No need to check for existing.context because config.context is in the cache,
+ // since the only way to create new graphs is the "call rule" and here. We cache
+ // at both places.
+ existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
+
+ // Preserve the precedence filter suppression during the merge
+ if config.getPrecedenceFilterSuppressed() {
+ existing.setPrecedenceFilterSuppressed(true)
+ }
+
+ // Replace the context because there is no need to do alt mapping
+ existing.SetContext(merged)
+
+ return true
+}
+
+func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
+
+ // states uses the standard comparator provided by the ATNState instance
+ //
+ states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst)
+
+ for i := 0; i < len(b.configs); i++ {
+ states.Put(b.configs[i].GetState())
+ }
+
+ return states
+}
+
+func (b *BaseATNConfigSet) HasSemanticContext() bool {
+ return b.hasSemanticContext
+}
+
+func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
+ b.hasSemanticContext = v
+}
+
+func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
+ preds := make([]SemanticContext, 0)
+
+ for i := 0; i < len(b.configs); i++ {
+ c := b.configs[i].GetSemanticContext()
+
+ if c != SemanticContextNone {
+ preds = append(preds, c)
+ }
+ }
+
+ return preds
+}
+
+func (b *BaseATNConfigSet) GetItems() []ATNConfig {
+ return b.configs
+}
+
+func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if b.configLookup.Len() == 0 {
+ return
+ }
+
+ for i := 0; i < len(b.configs); i++ {
+ config := b.configs[i]
+
+ config.SetContext(interpreter.getCachedContext(config.GetContext()))
+ }
+}
+
+func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
+ for i := 0; i < len(coll); i++ {
+ b.Add(coll[i], nil)
+ }
+
+ return false
+}
+
+// Compare is a hack function just to verify that adding DFAstares to the known
+// set works, so long as comparison of ATNConfigSet s works. For that to work, we
+// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't
+// know the order, so we do this inefficient hack. If this proves the point, then
+// we can change the config set to a better structure.
+func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool {
+ if len(b.configs) != len(bs.configs) {
+ return false
+ }
+
+ for _, c := range b.configs {
+ found := false
+ for _, c2 := range bs.configs {
+ if c.Equals(c2) {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return false
+ }
+
+ }
+ return true
+}
+
+func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*BaseATNConfigSet); !ok {
+ return false
+ }
+
+ other2 := other.(*BaseATNConfigSet)
+
+ return b.configs != nil &&
+ b.fullCtx == other2.fullCtx &&
+ b.uniqueAlt == other2.uniqueAlt &&
+ b.conflictingAlts == other2.conflictingAlts &&
+ b.hasSemanticContext == other2.hasSemanticContext &&
+ b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
+ b.Compare(other2)
+}
+
+func (b *BaseATNConfigSet) Hash() int {
+ if b.readOnly {
+ if b.cachedHash == -1 {
+ b.cachedHash = b.hashCodeConfigs()
+ }
+
+ return b.cachedHash
+ }
+
+ return b.hashCodeConfigs()
+}
+
+func (b *BaseATNConfigSet) hashCodeConfigs() int {
+ h := 1
+ for _, config := range b.configs {
+ h = 31*h + config.Hash()
+ }
+ return h
+}
+
+func (b *BaseATNConfigSet) Length() int {
+ return len(b.configs)
+}
+
+func (b *BaseATNConfigSet) IsEmpty() bool {
+ return len(b.configs) == 0
+}
+
+func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
+ if b.configLookup == nil {
+ panic("not implemented for read-only sets")
+ }
+
+ return b.configLookup.Contains(item)
+}
+
+func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
+ if b.configLookup == nil {
+ panic("not implemented for read-only sets")
+ }
+
+ return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set
+}
+
+func (b *BaseATNConfigSet) Clear() {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ b.configs = make([]ATNConfig, 0)
+ b.cachedHash = -1
+ b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
+}
+
+func (b *BaseATNConfigSet) FullContext() bool {
+ return b.fullCtx
+}
+
+func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
+ return b.dipsIntoOuterContext
+}
+
+func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
+ b.dipsIntoOuterContext = v
+}
+
+func (b *BaseATNConfigSet) GetUniqueAlt() int {
+ return b.uniqueAlt
+}
+
+func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
+ b.uniqueAlt = v
+}
+
+func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
+ return b.conflictingAlts
+}
+
+func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
+ b.conflictingAlts = v
+}
+
+func (b *BaseATNConfigSet) ReadOnly() bool {
+ return b.readOnly
+}
+
+func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
+ b.readOnly = readOnly
+
+ if readOnly {
+ b.configLookup = nil // Read only, so no need for the lookup cache
+ }
+}
+
+func (b *BaseATNConfigSet) String() string {
+ s := "["
+
+ for i, c := range b.configs {
+ s += c.String()
+
+ if i != len(b.configs)-1 {
+ s += ", "
+ }
+ }
+
+ s += "]"
+
+ if b.hasSemanticContext {
+ s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
+ }
+
+ if b.uniqueAlt != ATNInvalidAltNumber {
+ s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
+ }
+
+ if b.conflictingAlts != nil {
+ s += ",conflictingAlts=" + b.conflictingAlts.String()
+ }
+
+ if b.dipsIntoOuterContext {
+ s += ",dipsIntoOuterContext"
+ }
+
+ return s
+}
+
+type OrderedATNConfigSet struct {
+ *BaseATNConfigSet
+}
+
+func NewOrderedATNConfigSet() *OrderedATNConfigSet {
+ b := NewBaseATNConfigSet(false)
+
+ // This set uses the standard Hash() and Equals() from ATNConfig
+ b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
+
+ return &OrderedATNConfigSet{BaseATNConfigSet: b}
+}
+
+func hashATNConfig(i interface{}) int {
+ o := i.(ATNConfig)
+ hash := 7
+ hash = 31*hash + o.GetState().GetStateNumber()
+ hash = 31*hash + o.GetAlt()
+ hash = 31*hash + o.GetSemanticContext().Hash()
+ return hash
+}
+
+func equalATNConfigs(a, b interface{}) bool {
+ if a == nil || b == nil {
+ return false
+ }
+
+ if a == b {
+ return true
+ }
+
+ var ai, ok = a.(ATNConfig)
+ var bi, ok1 = b.(ATNConfig)
+
+ if !ok || !ok1 {
+ return false
+ }
+
+ if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() {
+ return false
+ }
+
+ if ai.GetAlt() != bi.GetAlt() {
+ return false
+ }
+
+ return ai.GetSemanticContext().Equals(bi.GetSemanticContext())
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
new file mode 100644
index 0000000..3c975ec
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "errors"
+
+var defaultATNDeserializationOptions = ATNDeserializationOptions{true, true, false}
+
+type ATNDeserializationOptions struct {
+ readOnly bool
+ verifyATN bool
+ generateRuleBypassTransitions bool
+}
+
+func (opts *ATNDeserializationOptions) ReadOnly() bool {
+ return opts.readOnly
+}
+
+func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
+ if opts.readOnly {
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.readOnly = readOnly
+}
+
+func (opts *ATNDeserializationOptions) VerifyATN() bool {
+ return opts.verifyATN
+}
+
+func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
+ if opts.readOnly {
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.verifyATN = verifyATN
+}
+
+func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
+ return opts.generateRuleBypassTransitions
+}
+
+func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
+ if opts.readOnly {
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.generateRuleBypassTransitions = generateRuleBypassTransitions
+}
+
+func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
+ return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
+}
+
+func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions {
+ o := new(ATNDeserializationOptions)
+ if other != nil {
+ *o = *other
+ o.readOnly = false
+ }
+ return o
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
new file mode 100644
index 0000000..3888856
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
@@ -0,0 +1,683 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+const serializedVersion = 4
+
+type loopEndStateIntPair struct {
+ item0 *LoopEndState
+ item1 int
+}
+
+type blockStartStateIntPair struct {
+ item0 BlockStartState
+ item1 int
+}
+
+type ATNDeserializer struct {
+ options *ATNDeserializationOptions
+ data []int32
+ pos int
+}
+
+func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
+ if options == nil {
+ options = &defaultATNDeserializationOptions
+ }
+
+ return &ATNDeserializer{options: options}
+}
+
+func stringInSlice(a string, list []string) int {
+ for i, b := range list {
+ if b == a {
+ return i
+ }
+ }
+
+ return -1
+}
+
+func (a *ATNDeserializer) Deserialize(data []int32) *ATN {
+ a.data = data
+ a.pos = 0
+ a.checkVersion()
+
+ atn := a.readATN()
+
+ a.readStates(atn)
+ a.readRules(atn)
+ a.readModes(atn)
+
+ sets := a.readSets(atn, nil)
+
+ a.readEdges(atn, sets)
+ a.readDecisions(atn)
+ a.readLexerActions(atn)
+ a.markPrecedenceDecisions(atn)
+ a.verifyATN(atn)
+
+ if a.options.GenerateRuleBypassTransitions() && atn.grammarType == ATNTypeParser {
+ a.generateRuleBypassTransitions(atn)
+ // Re-verify after modification
+ a.verifyATN(atn)
+ }
+
+ return atn
+
+}
+
+func (a *ATNDeserializer) checkVersion() {
+ version := a.readInt()
+
+ if version != serializedVersion {
+ panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(serializedVersion) + ").")
+ }
+}
+
+func (a *ATNDeserializer) readATN() *ATN {
+ grammarType := a.readInt()
+ maxTokenType := a.readInt()
+
+ return NewATN(grammarType, maxTokenType)
+}
+
+func (a *ATNDeserializer) readStates(atn *ATN) {
+ nstates := a.readInt()
+
+ // Allocate worst case size.
+ loopBackStateNumbers := make([]loopEndStateIntPair, 0, nstates)
+ endStateNumbers := make([]blockStartStateIntPair, 0, nstates)
+
+ // Preallocate states slice.
+ atn.states = make([]ATNState, 0, nstates)
+
+ for i := 0; i < nstates; i++ {
+ stype := a.readInt()
+
+ // Ignore bad types of states
+ if stype == ATNStateInvalidType {
+ atn.addState(nil)
+ continue
+ }
+
+ ruleIndex := a.readInt()
+
+ s := a.stateFactory(stype, ruleIndex)
+
+ if stype == ATNStateLoopEnd {
+ loopBackStateNumber := a.readInt()
+
+ loopBackStateNumbers = append(loopBackStateNumbers, loopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
+ } else if s2, ok := s.(BlockStartState); ok {
+ endStateNumber := a.readInt()
+
+ endStateNumbers = append(endStateNumbers, blockStartStateIntPair{s2, endStateNumber})
+ }
+
+ atn.addState(s)
+ }
+
+ // Delay the assignment of loop back and end states until we know all the state
+ // instances have been initialized
+ for _, pair := range loopBackStateNumbers {
+ pair.item0.loopBackState = atn.states[pair.item1]
+ }
+
+ for _, pair := range endStateNumbers {
+ pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
+ }
+
+ numNonGreedyStates := a.readInt()
+ for j := 0; j < numNonGreedyStates; j++ {
+ stateNumber := a.readInt()
+
+ atn.states[stateNumber].(DecisionState).setNonGreedy(true)
+ }
+
+ numPrecedenceStates := a.readInt()
+ for j := 0; j < numPrecedenceStates; j++ {
+ stateNumber := a.readInt()
+
+ atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true
+ }
+}
+
+func (a *ATNDeserializer) readRules(atn *ATN) {
+ nrules := a.readInt()
+
+ if atn.grammarType == ATNTypeLexer {
+ atn.ruleToTokenType = make([]int, nrules)
+ }
+
+ atn.ruleToStartState = make([]*RuleStartState, nrules)
+
+ for i := range atn.ruleToStartState {
+ s := a.readInt()
+ startState := atn.states[s].(*RuleStartState)
+
+ atn.ruleToStartState[i] = startState
+
+ if atn.grammarType == ATNTypeLexer {
+ tokenType := a.readInt()
+
+ atn.ruleToTokenType[i] = tokenType
+ }
+ }
+
+ atn.ruleToStopState = make([]*RuleStopState, nrules)
+
+ for _, state := range atn.states {
+ if s2, ok := state.(*RuleStopState); ok {
+ atn.ruleToStopState[s2.ruleIndex] = s2
+ atn.ruleToStartState[s2.ruleIndex].stopState = s2
+ }
+ }
+}
+
+func (a *ATNDeserializer) readModes(atn *ATN) {
+ nmodes := a.readInt()
+ atn.modeToStartState = make([]*TokensStartState, nmodes)
+
+ for i := range atn.modeToStartState {
+ s := a.readInt()
+
+ atn.modeToStartState[i] = atn.states[s].(*TokensStartState)
+ }
+}
+
+func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet {
+ m := a.readInt()
+
+ // Preallocate the needed capacity.
+ if cap(sets)-len(sets) < m {
+ isets := make([]*IntervalSet, len(sets), len(sets)+m)
+ copy(isets, sets)
+ sets = isets
+ }
+
+ for i := 0; i < m; i++ {
+ iset := NewIntervalSet()
+
+ sets = append(sets, iset)
+
+ n := a.readInt()
+ containsEOF := a.readInt()
+
+ if containsEOF != 0 {
+ iset.addOne(-1)
+ }
+
+ for j := 0; j < n; j++ {
+ i1 := a.readInt()
+ i2 := a.readInt()
+
+ iset.addRange(i1, i2)
+ }
+ }
+
+ return sets
+}
+
+func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
+ nedges := a.readInt()
+
+ for i := 0; i < nedges; i++ {
+ var (
+ src = a.readInt()
+ trg = a.readInt()
+ ttype = a.readInt()
+ arg1 = a.readInt()
+ arg2 = a.readInt()
+ arg3 = a.readInt()
+ trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
+ srcState = atn.states[src]
+ )
+
+ srcState.AddTransition(trans, -1)
+ }
+
+ // Edges for rule stop states can be derived, so they are not serialized
+ for _, state := range atn.states {
+ for _, t := range state.GetTransitions() {
+ var rt, ok = t.(*RuleTransition)
+
+ if !ok {
+ continue
+ }
+
+ outermostPrecedenceReturn := -1
+
+ if atn.ruleToStartState[rt.getTarget().GetRuleIndex()].isPrecedenceRule {
+ if rt.precedence == 0 {
+ outermostPrecedenceReturn = rt.getTarget().GetRuleIndex()
+ }
+ }
+
+ trans := NewEpsilonTransition(rt.followState, outermostPrecedenceReturn)
+
+ atn.ruleToStopState[rt.getTarget().GetRuleIndex()].AddTransition(trans, -1)
+ }
+ }
+
+ for _, state := range atn.states {
+ if s2, ok := state.(BlockStartState); ok {
+ // We need to know the end state to set its start state
+ if s2.getEndState() == nil {
+ panic("IllegalState")
+ }
+
+ // Block end states can only be associated to a single block start state
+ if s2.getEndState().startState != nil {
+ panic("IllegalState")
+ }
+
+ s2.getEndState().startState = state
+ }
+
+ if s2, ok := state.(*PlusLoopbackState); ok {
+ for _, t := range s2.GetTransitions() {
+ if t2, ok := t.getTarget().(*PlusBlockStartState); ok {
+ t2.loopBackState = state
+ }
+ }
+ } else if s2, ok := state.(*StarLoopbackState); ok {
+ for _, t := range s2.GetTransitions() {
+ if t2, ok := t.getTarget().(*StarLoopEntryState); ok {
+ t2.loopBackState = state
+ }
+ }
+ }
+ }
+}
+
+func (a *ATNDeserializer) readDecisions(atn *ATN) {
+ ndecisions := a.readInt()
+
+ for i := 0; i < ndecisions; i++ {
+ s := a.readInt()
+ decState := atn.states[s].(DecisionState)
+
+ atn.DecisionToState = append(atn.DecisionToState, decState)
+ decState.setDecision(i)
+ }
+}
+
+func (a *ATNDeserializer) readLexerActions(atn *ATN) {
+ if atn.grammarType == ATNTypeLexer {
+ count := a.readInt()
+
+ atn.lexerActions = make([]LexerAction, count)
+
+ for i := range atn.lexerActions {
+ actionType := a.readInt()
+ data1 := a.readInt()
+ data2 := a.readInt()
+ atn.lexerActions[i] = a.lexerActionFactory(actionType, data1, data2)
+ }
+ }
+}
+
+func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) {
+ count := len(atn.ruleToStartState)
+
+ for i := 0; i < count; i++ {
+ atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
+ }
+
+ for i := 0; i < count; i++ {
+ a.generateRuleBypassTransition(atn, i)
+ }
+}
+
+func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
+ bypassStart := NewBasicBlockStartState()
+
+ bypassStart.ruleIndex = idx
+ atn.addState(bypassStart)
+
+ bypassStop := NewBlockEndState()
+
+ bypassStop.ruleIndex = idx
+ atn.addState(bypassStop)
+
+ bypassStart.endState = bypassStop
+
+ atn.defineDecisionState(bypassStart.BaseDecisionState)
+
+ bypassStop.startState = bypassStart
+
+ var excludeTransition Transition
+ var endState ATNState
+
+ if atn.ruleToStartState[idx].isPrecedenceRule {
+ // Wrap from the beginning of the rule to the StarLoopEntryState
+ endState = nil
+
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ if a.stateIsEndStateFor(state, idx) != nil {
+ endState = state
+ excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0]
+
+ break
+ }
+ }
+
+ if excludeTransition == nil {
+ panic("Couldn't identify final state of the precedence rule prefix section.")
+ }
+ } else {
+ endState = atn.ruleToStopState[idx]
+ }
+
+ // All non-excluded transitions that currently target end state need to target
+ // blockEnd instead
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ for j := 0; j < len(state.GetTransitions()); j++ {
+ transition := state.GetTransitions()[j]
+
+ if transition == excludeTransition {
+ continue
+ }
+
+ if transition.getTarget() == endState {
+ transition.setTarget(bypassStop)
+ }
+ }
+ }
+
+ // All transitions leaving the rule start state need to leave blockStart instead
+ ruleToStartState := atn.ruleToStartState[idx]
+ count := len(ruleToStartState.GetTransitions())
+
+ for count > 0 {
+ bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1)
+ ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]})
+ }
+
+ // Link the new states
+ atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1)
+ bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1)
+
+ MatchState := NewBasicState()
+
+ atn.addState(MatchState)
+ MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
+ bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1)
+}
+
+func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState {
+ if state.GetRuleIndex() != idx {
+ return nil
+ }
+
+ if _, ok := state.(*StarLoopEntryState); !ok {
+ return nil
+ }
+
+ maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
+
+ if _, ok := maybeLoopEndState.(*LoopEndState); !ok {
+ return nil
+ }
+
+ var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
+
+ if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok {
+ return state
+ }
+
+ return nil
+}
+
+// markPrecedenceDecisions analyzes the StarLoopEntryState states in the
+// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to
+// the correct value.
+func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
+ for _, state := range atn.states {
+ if _, ok := state.(*StarLoopEntryState); !ok {
+ continue
+ }
+
+ // We analyze the ATN to determine if a ATN decision state is the
+ // decision for the closure block that determines whether a
+ // precedence rule should continue or complete.
+ if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
+ maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
+
+ if s3, ok := maybeLoopEndState.(*LoopEndState); ok {
+ var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
+
+ if s3.epsilonOnlyTransitions && ok2 {
+ state.(*StarLoopEntryState).precedenceRuleDecision = true
+ }
+ }
+ }
+ }
+}
+
+func (a *ATNDeserializer) verifyATN(atn *ATN) {
+ if !a.options.VerifyATN() {
+ return
+ }
+
+ // Verify assumptions
+ for _, state := range atn.states {
+ if state == nil {
+ continue
+ }
+
+ a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "")
+
+ switch s2 := state.(type) {
+ case *PlusBlockStartState:
+ a.checkCondition(s2.loopBackState != nil, "")
+
+ case *StarLoopEntryState:
+ a.checkCondition(s2.loopBackState != nil, "")
+ a.checkCondition(len(s2.GetTransitions()) == 2, "")
+
+ switch s2.transitions[0].getTarget().(type) {
+ case *StarBlockStartState:
+ _, ok := s2.transitions[1].getTarget().(*LoopEndState)
+
+ a.checkCondition(ok, "")
+ a.checkCondition(!s2.nonGreedy, "")
+
+ case *LoopEndState:
+ var _, ok = s2.transitions[1].getTarget().(*StarBlockStartState)
+
+ a.checkCondition(ok, "")
+ a.checkCondition(s2.nonGreedy, "")
+
+ default:
+ panic("IllegalState")
+ }
+
+ case *StarLoopbackState:
+ a.checkCondition(len(state.GetTransitions()) == 1, "")
+
+ var _, ok = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
+
+ a.checkCondition(ok, "")
+
+ case *LoopEndState:
+ a.checkCondition(s2.loopBackState != nil, "")
+
+ case *RuleStartState:
+ a.checkCondition(s2.stopState != nil, "")
+
+ case BlockStartState:
+ a.checkCondition(s2.getEndState() != nil, "")
+
+ case *BlockEndState:
+ a.checkCondition(s2.startState != nil, "")
+
+ case DecisionState:
+ a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "")
+
+ default:
+ var _, ok = s2.(*RuleStopState)
+
+ a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "")
+ }
+ }
+}
+
+func (a *ATNDeserializer) checkCondition(condition bool, message string) {
+ if !condition {
+ if message == "" {
+ message = "IllegalState"
+ }
+
+ panic(message)
+ }
+}
+
+func (a *ATNDeserializer) readInt() int {
+ v := a.data[a.pos]
+
+ a.pos++
+
+ return int(v) // data is 32 bits but int is at least that big
+}
+
+func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
+ target := atn.states[trg]
+
+ switch typeIndex {
+ case TransitionEPSILON:
+ return NewEpsilonTransition(target, -1)
+
+ case TransitionRANGE:
+ if arg3 != 0 {
+ return NewRangeTransition(target, TokenEOF, arg2)
+ }
+
+ return NewRangeTransition(target, arg1, arg2)
+
+ case TransitionRULE:
+ return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
+
+ case TransitionPREDICATE:
+ return NewPredicateTransition(target, arg1, arg2, arg3 != 0)
+
+ case TransitionPRECEDENCE:
+ return NewPrecedencePredicateTransition(target, arg1)
+
+ case TransitionATOM:
+ if arg3 != 0 {
+ return NewAtomTransition(target, TokenEOF)
+ }
+
+ return NewAtomTransition(target, arg1)
+
+ case TransitionACTION:
+ return NewActionTransition(target, arg1, arg2, arg3 != 0)
+
+ case TransitionSET:
+ return NewSetTransition(target, sets[arg1])
+
+ case TransitionNOTSET:
+ return NewNotSetTransition(target, sets[arg1])
+
+ case TransitionWILDCARD:
+ return NewWildcardTransition(target)
+ }
+
+ panic("The specified transition type is not valid.")
+}
+
+func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState {
+ var s ATNState
+
+ switch typeIndex {
+ case ATNStateInvalidType:
+ return nil
+
+ case ATNStateBasic:
+ s = NewBasicState()
+
+ case ATNStateRuleStart:
+ s = NewRuleStartState()
+
+ case ATNStateBlockStart:
+ s = NewBasicBlockStartState()
+
+ case ATNStatePlusBlockStart:
+ s = NewPlusBlockStartState()
+
+ case ATNStateStarBlockStart:
+ s = NewStarBlockStartState()
+
+ case ATNStateTokenStart:
+ s = NewTokensStartState()
+
+ case ATNStateRuleStop:
+ s = NewRuleStopState()
+
+ case ATNStateBlockEnd:
+ s = NewBlockEndState()
+
+ case ATNStateStarLoopBack:
+ s = NewStarLoopbackState()
+
+ case ATNStateStarLoopEntry:
+ s = NewStarLoopEntryState()
+
+ case ATNStatePlusLoopBack:
+ s = NewPlusLoopbackState()
+
+ case ATNStateLoopEnd:
+ s = NewLoopEndState()
+
+ default:
+ panic(fmt.Sprintf("state type %d is invalid", typeIndex))
+ }
+
+ s.SetRuleIndex(ruleIndex)
+
+ return s
+}
+
+func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction {
+ switch typeIndex {
+ case LexerActionTypeChannel:
+ return NewLexerChannelAction(data1)
+
+ case LexerActionTypeCustom:
+ return NewLexerCustomAction(data1, data2)
+
+ case LexerActionTypeMode:
+ return NewLexerModeAction(data1)
+
+ case LexerActionTypeMore:
+ return LexerMoreActionINSTANCE
+
+ case LexerActionTypePopMode:
+ return LexerPopModeActionINSTANCE
+
+ case LexerActionTypePushMode:
+ return NewLexerPushModeAction(data1)
+
+ case LexerActionTypeSkip:
+ return LexerSkipActionINSTANCE
+
+ case LexerActionTypeType:
+ return NewLexerTypeAction(data1)
+
+ default:
+ panic(fmt.Sprintf("lexer action %d is invalid", typeIndex))
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
new file mode 100644
index 0000000..4152911
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
+
+type IATNSimulator interface {
+ SharedContextCache() *PredictionContextCache
+ ATN() *ATN
+ DecisionToDFA() []*DFA
+}
+
+type BaseATNSimulator struct {
+ atn *ATN
+ sharedContextCache *PredictionContextCache
+ decisionToDFA []*DFA
+}
+
+func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
+ b := new(BaseATNSimulator)
+
+ b.atn = atn
+ b.sharedContextCache = sharedContextCache
+
+ return b
+}
+
+func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
+ if b.sharedContextCache == nil {
+ return context
+ }
+
+ visited := make(map[PredictionContext]PredictionContext)
+
+ return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
+}
+
+func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
+ return b.sharedContextCache
+}
+
+func (b *BaseATNSimulator) ATN() *ATN {
+ return b.atn
+}
+
+func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
+ return b.decisionToDFA
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
new file mode 100644
index 0000000..1f2a56b
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
@@ -0,0 +1,393 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "strconv"
+
+// Constants for serialization.
+const (
+ ATNStateInvalidType = 0
+ ATNStateBasic = 1
+ ATNStateRuleStart = 2
+ ATNStateBlockStart = 3
+ ATNStatePlusBlockStart = 4
+ ATNStateStarBlockStart = 5
+ ATNStateTokenStart = 6
+ ATNStateRuleStop = 7
+ ATNStateBlockEnd = 8
+ ATNStateStarLoopBack = 9
+ ATNStateStarLoopEntry = 10
+ ATNStatePlusLoopBack = 11
+ ATNStateLoopEnd = 12
+
+ ATNStateInvalidStateNumber = -1
+)
+
+var ATNStateInitialNumTransitions = 4
+
+type ATNState interface {
+ GetEpsilonOnlyTransitions() bool
+
+ GetRuleIndex() int
+ SetRuleIndex(int)
+
+ GetNextTokenWithinRule() *IntervalSet
+ SetNextTokenWithinRule(*IntervalSet)
+
+ GetATN() *ATN
+ SetATN(*ATN)
+
+ GetStateType() int
+
+ GetStateNumber() int
+ SetStateNumber(int)
+
+ GetTransitions() []Transition
+ SetTransitions([]Transition)
+ AddTransition(Transition, int)
+
+ String() string
+ Hash() int
+ Equals(Collectable[ATNState]) bool
+}
+
+type BaseATNState struct {
+ // NextTokenWithinRule caches lookahead during parsing. Not used during construction.
+ NextTokenWithinRule *IntervalSet
+
+ // atn is the current ATN.
+ atn *ATN
+
+ epsilonOnlyTransitions bool
+
+ // ruleIndex tracks the Rule index because there are no Rule objects at runtime.
+ ruleIndex int
+
+ stateNumber int
+
+ stateType int
+
+ // Track the transitions emanating from this ATN state.
+ transitions []Transition
+}
+
+func NewBaseATNState() *BaseATNState {
+ return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
+}
+
+func (as *BaseATNState) GetRuleIndex() int {
+ return as.ruleIndex
+}
+
+func (as *BaseATNState) SetRuleIndex(v int) {
+ as.ruleIndex = v
+}
+func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
+ return as.epsilonOnlyTransitions
+}
+
+func (as *BaseATNState) GetATN() *ATN {
+ return as.atn
+}
+
+func (as *BaseATNState) SetATN(atn *ATN) {
+ as.atn = atn
+}
+
+func (as *BaseATNState) GetTransitions() []Transition {
+ return as.transitions
+}
+
+func (as *BaseATNState) SetTransitions(t []Transition) {
+ as.transitions = t
+}
+
+func (as *BaseATNState) GetStateType() int {
+ return as.stateType
+}
+
+func (as *BaseATNState) GetStateNumber() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) SetStateNumber(stateNumber int) {
+ as.stateNumber = stateNumber
+}
+
+func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
+ return as.NextTokenWithinRule
+}
+
+func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
+ as.NextTokenWithinRule = v
+}
+
+func (as *BaseATNState) Hash() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) String() string {
+ return strconv.Itoa(as.stateNumber)
+}
+
+func (as *BaseATNState) Equals(other Collectable[ATNState]) bool {
+ if ot, ok := other.(ATNState); ok {
+ return as.stateNumber == ot.GetStateNumber()
+ }
+
+ return false
+}
+
+func (as *BaseATNState) isNonGreedyExitState() bool {
+ return false
+}
+
+func (as *BaseATNState) AddTransition(trans Transition, index int) {
+ if len(as.transitions) == 0 {
+ as.epsilonOnlyTransitions = trans.getIsEpsilon()
+ } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
+ as.epsilonOnlyTransitions = false
+ }
+
+ if index == -1 {
+ as.transitions = append(as.transitions, trans)
+ } else {
+ as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
+ // TODO: as.transitions.splice(index, 1, trans)
+ }
+}
+
+type BasicState struct {
+ *BaseATNState
+}
+
+func NewBasicState() *BasicState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateBasic
+
+ return &BasicState{BaseATNState: b}
+}
+
+type DecisionState interface {
+ ATNState
+
+ getDecision() int
+ setDecision(int)
+
+ getNonGreedy() bool
+ setNonGreedy(bool)
+}
+
+type BaseDecisionState struct {
+ *BaseATNState
+ decision int
+ nonGreedy bool
+}
+
+func NewBaseDecisionState() *BaseDecisionState {
+ return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
+}
+
+func (s *BaseDecisionState) getDecision() int {
+ return s.decision
+}
+
+func (s *BaseDecisionState) setDecision(b int) {
+ s.decision = b
+}
+
+func (s *BaseDecisionState) getNonGreedy() bool {
+ return s.nonGreedy
+}
+
+func (s *BaseDecisionState) setNonGreedy(b bool) {
+ s.nonGreedy = b
+}
+
+type BlockStartState interface {
+ DecisionState
+
+ getEndState() *BlockEndState
+ setEndState(*BlockEndState)
+}
+
+// BaseBlockStartState is the start of a regular (...) block.
+type BaseBlockStartState struct {
+ *BaseDecisionState
+ endState *BlockEndState
+}
+
+func NewBlockStartState() *BaseBlockStartState {
+ return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
+}
+
+func (s *BaseBlockStartState) getEndState() *BlockEndState {
+ return s.endState
+}
+
+func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
+ s.endState = b
+}
+
+type BasicBlockStartState struct {
+ *BaseBlockStartState
+}
+
+func NewBasicBlockStartState() *BasicBlockStartState {
+ b := NewBlockStartState()
+
+ b.stateType = ATNStateBlockStart
+
+ return &BasicBlockStartState{BaseBlockStartState: b}
+}
+
+var _ BlockStartState = &BasicBlockStartState{}
+
+// BlockEndState is a terminal node of a simple (a|b|c) block.
+type BlockEndState struct {
+ *BaseATNState
+ startState ATNState
+}
+
+func NewBlockEndState() *BlockEndState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateBlockEnd
+
+ return &BlockEndState{BaseATNState: b}
+}
+
+// RuleStopState is the last node in the ATN for a rule, unless that rule is the
+// start symbol. In that case, there is one transition to EOF. Later, we might
+// encode references to all calls to this rule to compute FOLLOW sets for error
+// handling.
+type RuleStopState struct {
+ *BaseATNState
+}
+
+func NewRuleStopState() *RuleStopState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateRuleStop
+
+ return &RuleStopState{BaseATNState: b}
+}
+
+type RuleStartState struct {
+ *BaseATNState
+ stopState ATNState
+ isPrecedenceRule bool
+}
+
+func NewRuleStartState() *RuleStartState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateRuleStart
+
+ return &RuleStartState{BaseATNState: b}
+}
+
+// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
+// transitions: one to the loop back to start of the block, and one to exit.
+type PlusLoopbackState struct {
+ *BaseDecisionState
+}
+
+func NewPlusLoopbackState() *PlusLoopbackState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStatePlusLoopBack
+
+ return &PlusLoopbackState{BaseDecisionState: b}
+}
+
+// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
+// decision state; we don't use it for code generation. Somebody might need it,
+// it is included for completeness. In reality, PlusLoopbackState is the real
+// decision-making node for A+.
+type PlusBlockStartState struct {
+ *BaseBlockStartState
+ loopBackState ATNState
+}
+
+func NewPlusBlockStartState() *PlusBlockStartState {
+ b := NewBlockStartState()
+
+ b.stateType = ATNStatePlusBlockStart
+
+ return &PlusBlockStartState{BaseBlockStartState: b}
+}
+
+var _ BlockStartState = &PlusBlockStartState{}
+
+// StarBlockStartState is the block that begins a closure loop.
+type StarBlockStartState struct {
+ *BaseBlockStartState
+}
+
+func NewStarBlockStartState() *StarBlockStartState {
+ b := NewBlockStartState()
+
+ b.stateType = ATNStateStarBlockStart
+
+ return &StarBlockStartState{BaseBlockStartState: b}
+}
+
+var _ BlockStartState = &StarBlockStartState{}
+
+type StarLoopbackState struct {
+ *BaseATNState
+}
+
+func NewStarLoopbackState() *StarLoopbackState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateStarLoopBack
+
+ return &StarLoopbackState{BaseATNState: b}
+}
+
+type StarLoopEntryState struct {
+ *BaseDecisionState
+ loopBackState ATNState
+ precedenceRuleDecision bool
+}
+
+func NewStarLoopEntryState() *StarLoopEntryState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStateStarLoopEntry
+
+ // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
+ return &StarLoopEntryState{BaseDecisionState: b}
+}
+
+// LoopEndState marks the end of a * or + loop.
+type LoopEndState struct {
+ *BaseATNState
+ loopBackState ATNState
+}
+
+func NewLoopEndState() *LoopEndState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateLoopEnd
+
+ return &LoopEndState{BaseATNState: b}
+}
+
+// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
+type TokensStartState struct {
+ *BaseDecisionState
+}
+
+func NewTokensStartState() *TokensStartState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStateTokenStart
+
+ return &TokensStartState{BaseDecisionState: b}
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go
new file mode 100644
index 0000000..3a515a1
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// Represent the type of recognizer an ATN applies to.
+const (
+ ATNTypeLexer = 0
+ ATNTypeParser = 1
+)
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
new file mode 100644
index 0000000..c33f0ad
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type CharStream interface {
+ IntStream
+ GetText(int, int) string
+ GetTextFromTokens(start, end Token) string
+ GetTextFromInterval(*Interval) string
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go
new file mode 100644
index 0000000..1bb0314
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// TokenFactory creates CommonToken objects.
+type TokenFactory interface {
+ Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
+}
+
+// CommonTokenFactory is the default TokenFactory implementation.
+type CommonTokenFactory struct {
+ // copyText indicates whether CommonToken.setText should be called after
+ // constructing tokens to explicitly set the text. This is useful for cases
+ // where the input stream might not be able to provide arbitrary substrings of
+ // text from the input after the lexer creates a token (e.g. the
+ // implementation of CharStream.GetText in UnbufferedCharStream panics an
+ // UnsupportedOperationException). Explicitly setting the token text allows
+ // Token.GetText to be called at any time regardless of the input stream
+ // implementation.
+ //
+ // The default value is false to avoid the performance and memory overhead of
+ // copying text for every token unless explicitly requested.
+ copyText bool
+}
+
+func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
+ return &CommonTokenFactory{copyText: copyText}
+}
+
+// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not
+// explicitly copy token text when constructing tokens.
+var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
+
+func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token {
+ t := NewCommonToken(source, ttype, channel, start, stop)
+
+ t.line = line
+ t.column = column
+
+ if text != "" {
+ t.SetText(text)
+ } else if c.copyText && source.charStream != nil {
+ t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
+ }
+
+ return t
+}
+
+func (c *CommonTokenFactory) createThin(ttype int, text string) Token {
+ t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
+ t.SetText(text)
+
+ return t
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
new file mode 100644
index 0000000..c6c9485
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
@@ -0,0 +1,449 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+// CommonTokenStream is an implementation of TokenStream that loads tokens from
+// a TokenSource on-demand and places the tokens in a buffer to provide access
+// to any previous token by index. This token stream ignores the value of
+// Token.getChannel. If your parser requires the token stream filter tokens to
+// only those on a particular channel, such as Token.DEFAULT_CHANNEL or
+// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
+type CommonTokenStream struct {
+ channel int
+
+ // fetchedEOF indicates whether the Token.EOF token has been fetched from
+ // tokenSource and added to tokens. This field improves performance for the
+ // following cases:
+ //
+ // consume: The lookahead check in consume to preven consuming the EOF symbol is
+ // optimized by checking the values of fetchedEOF and p instead of calling LA.
+ //
+ // fetch: The check to prevent adding multiple EOF symbols into tokens is
+ // trivial with bt field.
+ fetchedEOF bool
+
+ // index indexs into tokens of the current token (next token to consume).
+ // tokens[p] should be LT(1). It is set to -1 when the stream is first
+ // constructed or when SetTokenSource is called, indicating that the first token
+ // has not yet been fetched from the token source. For additional information,
+ // see the documentation of IntStream for a description of initializing methods.
+ index int
+
+ // tokenSource is the TokenSource from which tokens for the bt stream are
+ // fetched.
+ tokenSource TokenSource
+
+ // tokens is all tokens fetched from the token source. The list is considered a
+ // complete view of the input once fetchedEOF is set to true.
+ tokens []Token
+}
+
+func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
+ return &CommonTokenStream{
+ channel: channel,
+ index: -1,
+ tokenSource: lexer,
+ tokens: make([]Token, 0),
+ }
+}
+
+func (c *CommonTokenStream) GetAllTokens() []Token {
+ return c.tokens
+}
+
+func (c *CommonTokenStream) Mark() int {
+ return 0
+}
+
+func (c *CommonTokenStream) Release(marker int) {}
+
+func (c *CommonTokenStream) reset() {
+ c.Seek(0)
+}
+
+func (c *CommonTokenStream) Seek(index int) {
+ c.lazyInit()
+ c.index = c.adjustSeekIndex(index)
+}
+
+func (c *CommonTokenStream) Get(index int) Token {
+ c.lazyInit()
+
+ return c.tokens[index]
+}
+
+func (c *CommonTokenStream) Consume() {
+ SkipEOFCheck := false
+
+ if c.index >= 0 {
+ if c.fetchedEOF {
+ // The last token in tokens is EOF. Skip the check if p indexes any fetched.
+ // token except the last.
+ SkipEOFCheck = c.index < len(c.tokens)-1
+ } else {
+ // No EOF token in tokens. Skip the check if p indexes a fetched token.
+ SkipEOFCheck = c.index < len(c.tokens)
+ }
+ } else {
+ // Not yet initialized
+ SkipEOFCheck = false
+ }
+
+ if !SkipEOFCheck && c.LA(1) == TokenEOF {
+ panic("cannot consume EOF")
+ }
+
+ if c.Sync(c.index + 1) {
+ c.index = c.adjustSeekIndex(c.index + 1)
+ }
+}
+
+// Sync makes sure index i in tokens has a token and returns true if a token is
+// located at index i and otherwise false.
+func (c *CommonTokenStream) Sync(i int) bool {
+ n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
+
+ if n > 0 {
+ fetched := c.fetch(n)
+ return fetched >= n
+ }
+
+ return true
+}
+
+// fetch adds n elements to buffer and returns the actual number of elements
+// added to the buffer.
+func (c *CommonTokenStream) fetch(n int) int {
+ if c.fetchedEOF {
+ return 0
+ }
+
+ for i := 0; i < n; i++ {
+ t := c.tokenSource.NextToken()
+
+ t.SetTokenIndex(len(c.tokens))
+ c.tokens = append(c.tokens, t)
+
+ if t.GetTokenType() == TokenEOF {
+ c.fetchedEOF = true
+
+ return i + 1
+ }
+ }
+
+ return n
+}
+
+// GetTokens gets all tokens from start to stop inclusive.
+func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token {
+ if start < 0 || stop < 0 {
+ return nil
+ }
+
+ c.lazyInit()
+
+ subset := make([]Token, 0)
+
+ if stop >= len(c.tokens) {
+ stop = len(c.tokens) - 1
+ }
+
+ for i := start; i < stop; i++ {
+ t := c.tokens[i]
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+
+ if types == nil || types.contains(t.GetTokenType()) {
+ subset = append(subset, t)
+ }
+ }
+
+ return subset
+}
+
+func (c *CommonTokenStream) LA(i int) int {
+ return c.LT(i).GetTokenType()
+}
+
+func (c *CommonTokenStream) lazyInit() {
+ if c.index == -1 {
+ c.setup()
+ }
+}
+
+func (c *CommonTokenStream) setup() {
+ c.Sync(0)
+ c.index = c.adjustSeekIndex(0)
+}
+
+func (c *CommonTokenStream) GetTokenSource() TokenSource {
+ return c.tokenSource
+}
+
+// SetTokenSource resets the c token stream by setting its token source.
+func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
+ c.tokenSource = tokenSource
+ c.tokens = make([]Token, 0)
+ c.index = -1
+}
+
+// NextTokenOnChannel returns the index of the next token on channel given a
+// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
+// no tokens on channel between i and EOF.
+func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
+ c.Sync(i)
+
+ if i >= len(c.tokens) {
+ return -1
+ }
+
+ token := c.tokens[i]
+
+ for token.GetChannel() != c.channel {
+ if token.GetTokenType() == TokenEOF {
+ return -1
+ }
+
+ i++
+ c.Sync(i)
+ token = c.tokens[i]
+ }
+
+ return i
+}
+
+// previousTokenOnChannel returns the index of the previous token on channel
+// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if
+// there are no tokens on channel between i and 0.
+func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int {
+ for i >= 0 && c.tokens[i].GetChannel() != channel {
+ i--
+ }
+
+ return i
+}
+
+// GetHiddenTokensToRight collects all tokens on a specified channel to the
+// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
+// or EOF. If channel is -1, it finds any non-default channel token.
+func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token {
+ c.lazyInit()
+
+ if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
+ panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
+ }
+
+ nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
+ from := tokenIndex + 1
+
+ // If no onchannel to the right, then nextOnChannel == -1, so set to to last token
+ var to int
+
+ if nextOnChannel == -1 {
+ to = len(c.tokens) - 1
+ } else {
+ to = nextOnChannel
+ }
+
+ return c.filterForChannel(from, to, channel)
+}
+
+// GetHiddenTokensToLeft collects all tokens on channel to the left of the
+// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is
+// -1, it finds any non default channel token.
+func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token {
+ c.lazyInit()
+
+ if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
+ panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
+ }
+
+ prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
+
+ if prevOnChannel == tokenIndex-1 {
+ return nil
+ }
+
+ // If there are none on channel to the left and prevOnChannel == -1 then from = 0
+ from := prevOnChannel + 1
+ to := tokenIndex - 1
+
+ return c.filterForChannel(from, to, channel)
+}
+
+func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token {
+ hidden := make([]Token, 0)
+
+ for i := left; i < right+1; i++ {
+ t := c.tokens[i]
+
+ if channel == -1 {
+ if t.GetChannel() != LexerDefaultTokenChannel {
+ hidden = append(hidden, t)
+ }
+ } else if t.GetChannel() == channel {
+ hidden = append(hidden, t)
+ }
+ }
+
+ if len(hidden) == 0 {
+ return nil
+ }
+
+ return hidden
+}
+
+func (c *CommonTokenStream) GetSourceName() string {
+ return c.tokenSource.GetSourceName()
+}
+
+func (c *CommonTokenStream) Size() int {
+ return len(c.tokens)
+}
+
+func (c *CommonTokenStream) Index() int {
+ return c.index
+}
+
+func (c *CommonTokenStream) GetAllText() string {
+ return c.GetTextFromInterval(nil)
+}
+
+func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
+ if start == nil || end == nil {
+ return ""
+ }
+
+ return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex()))
+}
+
+func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string {
+ return c.GetTextFromInterval(interval.GetSourceInterval())
+}
+
+func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
+ c.lazyInit()
+
+ if interval == nil {
+ c.Fill()
+ interval = NewInterval(0, len(c.tokens)-1)
+ } else {
+ c.Sync(interval.Stop)
+ }
+
+ start := interval.Start
+ stop := interval.Stop
+
+ if start < 0 || stop < 0 {
+ return ""
+ }
+
+ if stop >= len(c.tokens) {
+ stop = len(c.tokens) - 1
+ }
+
+ s := ""
+
+ for i := start; i < stop+1; i++ {
+ t := c.tokens[i]
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+
+ s += t.GetText()
+ }
+
+ return s
+}
+
+// Fill gets all tokens from the lexer until EOF.
+func (c *CommonTokenStream) Fill() {
+ c.lazyInit()
+
+ for c.fetch(1000) == 1000 {
+ continue
+ }
+}
+
+func (c *CommonTokenStream) adjustSeekIndex(i int) int {
+ return c.NextTokenOnChannel(i, c.channel)
+}
+
+func (c *CommonTokenStream) LB(k int) Token {
+ if k == 0 || c.index-k < 0 {
+ return nil
+ }
+
+ i := c.index
+ n := 1
+
+ // Find k good tokens looking backward
+ for n <= k {
+ // Skip off-channel tokens
+ i = c.previousTokenOnChannel(i-1, c.channel)
+ n++
+ }
+
+ if i < 0 {
+ return nil
+ }
+
+ return c.tokens[i]
+}
+
+func (c *CommonTokenStream) LT(k int) Token {
+ c.lazyInit()
+
+ if k == 0 {
+ return nil
+ }
+
+ if k < 0 {
+ return c.LB(-k)
+ }
+
+ i := c.index
+ n := 1 // We know tokens[n] is valid
+
+ // Find k good tokens
+ for n < k {
+ // Skip off-channel tokens, but make sure to not look past EOF
+ if c.Sync(i + 1) {
+ i = c.NextTokenOnChannel(i+1, c.channel)
+ }
+
+ n++
+ }
+
+ return c.tokens[i]
+}
+
+// getNumberOfOnChannelTokens counts EOF once.
+func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
+ var n int
+
+ c.Fill()
+
+ for i := 0; i < len(c.tokens); i++ {
+ t := c.tokens[i]
+
+ if t.GetChannel() == c.channel {
+ n++
+ }
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+ }
+
+ return n
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
new file mode 100644
index 0000000..9ea3200
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
@@ -0,0 +1,147 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+// This file contains all the implementations of custom comparators used for generic collections when the
+// Hash() and Equals() funcs supplied by the struct objects themselves need to be overridden. Normally, we would
+// put the comparators in the source file for the struct themselves, but given the organization of this code is
+// sorta kinda based upon the Java code, I found it confusing trying to find out which comparator was where and used by
+// which instantiation of a collection. For instance, an Array2DHashSet in the Java source, when used with ATNConfig
+// collections requires three different comparators depending on what the collection is being used for. Collecting - pun intended -
+// all the comparators here, makes it much easier to see which implementation of hash and equals is used by which collection.
+// It also makes it easy to verify that the Hash() and Equals() functions marry up with the Java implementations.
+
+// ObjEqComparator is the equivalent of the Java ObjectEqualityComparator, which is the default instance of
+// Equality comparator. We do not have inheritance in Go, only interfaces, so we use generics to enforce some
+// type safety and avoid having to implement this for every type that we want to perform comparison on.
+//
+// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
+// allows us to use it in any collection instance that does nto require a special hash or equals implementation.
+type ObjEqComparator[T Collectable[T]] struct{}
+
+var (
+ aStateEqInst = &ObjEqComparator[ATNState]{}
+ aConfEqInst = &ObjEqComparator[ATNConfig]{}
+ aConfCompInst = &ATNConfigComparator[ATNConfig]{}
+ atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{}
+ dfaStateEqInst = &ObjEqComparator[*DFAState]{}
+ semctxEqInst = &ObjEqComparator[SemanticContext]{}
+ atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{}
+)
+
+// Equals2 delegates to the Equals() method of type T
+func (c *ObjEqComparator[T]) Equals2(o1, o2 T) bool {
+ return o1.Equals(o2)
+}
+
+// Hash1 delegates to the Hash() method of type T
+func (c *ObjEqComparator[T]) Hash1(o T) int {
+
+ return o.Hash()
+}
+
+type SemCComparator[T Collectable[T]] struct{}
+
+// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet
+// and has a custom Equals() and Hash() implementation, because equality is not based on the
+// standard Hash() and Equals() methods of the ATNConfig type.
+type ATNConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
+func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetAlt() == o2.GetAlt() &&
+ o1.GetSemanticContext().Equals(o2.GetSemanticContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
+func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int {
+ hash := 7
+ hash = 31*hash + o.GetState().GetStateNumber()
+ hash = 31*hash + o.GetAlt()
+ hash = 31*hash + o.GetSemanticContext().Hash()
+ return hash
+}
+
+// ATNAltConfigComparator is used as the comparator for mapping configs to Alt Bitsets
+type ATNAltConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
+func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetContext().Equals(o2.GetContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
+func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int {
+ h := murmurInit(7)
+ h = murmurUpdate(h, o.GetState().GetStateNumber())
+ h = murmurUpdate(h, o.GetContext().Hash())
+ return murmurFinish(h, 2)
+}
+
+// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet
+// and has a custom Equals() and Hash() implementation, because equality is not based on the
+// standard Hash() and Equals() methods of the ATNConfig type.
+type BaseATNConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
+func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetAlt() == o2.GetAlt() &&
+ o1.GetSemanticContext().Equals(o2.GetSemanticContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
+// delegates to the standard Hash() method of the ATNConfig type.
+func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int {
+
+ return o.Hash()
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
new file mode 100644
index 0000000..bfd43e1
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
@@ -0,0 +1,148 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type DFA struct {
+ // atnStartState is the ATN state in which this was created
+ atnStartState DecisionState
+
+ decision int
+
+ // states is all the DFA states. Use Map to get the old state back; Set can only
+ // indicate whether it is there. Go maps implement key hash collisions and so on and are very
+ // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva
+ // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
+ // to see if they really are the same object.
+ //
+ //
+ states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
+
+ numstates int
+
+ s0 *DFAState
+
+ // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
+ // True if the DFA is for a precedence decision and false otherwise.
+ precedenceDfa bool
+}
+
+func NewDFA(atnStartState DecisionState, decision int) *DFA {
+ dfa := &DFA{
+ atnStartState: atnStartState,
+ decision: decision,
+ states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst),
+ }
+ if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
+ dfa.precedenceDfa = true
+ dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false))
+ dfa.s0.isAcceptState = false
+ dfa.s0.requiresFullContext = false
+ }
+ return dfa
+}
+
+// getPrecedenceStartState gets the start state for the current precedence and
+// returns the start state corresponding to the specified precedence if a start
+// state exists for the specified precedence and nil otherwise. d must be a
+// precedence DFA. See also isPrecedenceDfa.
+func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
+ if !d.getPrecedenceDfa() {
+ panic("only precedence DFAs may contain a precedence start state")
+ }
+
+ // s0.edges is never nil for a precedence DFA
+ if precedence < 0 || precedence >= len(d.getS0().getEdges()) {
+ return nil
+ }
+
+ return d.getS0().getIthEdge(precedence)
+}
+
+// setPrecedenceStartState sets the start state for the current precedence. d
+// must be a precedence DFA. See also isPrecedenceDfa.
+func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
+ if !d.getPrecedenceDfa() {
+ panic("only precedence DFAs may contain a precedence start state")
+ }
+
+ if precedence < 0 {
+ return
+ }
+
+ // Synchronization on s0 here is ok. When the DFA is turned into a
+ // precedence DFA, s0 will be initialized once and not updated again. s0.edges
+ // is never nil for a precedence DFA.
+ s0 := d.getS0()
+ if precedence >= s0.numEdges() {
+ edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...)
+ s0.setEdges(edges)
+ d.setS0(s0)
+ }
+
+ s0.setIthEdge(precedence, startState)
+}
+
+func (d *DFA) getPrecedenceDfa() bool {
+ return d.precedenceDfa
+}
+
+// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
+// from the current DFA configuration, then d.states is cleared, the initial
+// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
+// store the start states for individual precedence values if precedenceDfa is
+// true or nil otherwise, and d.precedenceDfa is updated.
+func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
+ if d.getPrecedenceDfa() != precedenceDfa {
+ d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst)
+ d.numstates = 0
+
+ if precedenceDfa {
+ precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
+
+ precedenceState.setEdges(make([]*DFAState, 0))
+ precedenceState.isAcceptState = false
+ precedenceState.requiresFullContext = false
+ d.setS0(precedenceState)
+ } else {
+ d.setS0(nil)
+ }
+
+ d.precedenceDfa = precedenceDfa
+ }
+}
+
+func (d *DFA) getS0() *DFAState {
+ return d.s0
+}
+
+func (d *DFA) setS0(s *DFAState) {
+ d.s0 = s
+}
+
+// sortedStates returns the states in d sorted by their state number.
+func (d *DFA) sortedStates() []*DFAState {
+
+ vs := d.states.SortedSlice(func(i, j *DFAState) bool {
+ return i.stateNumber < j.stateNumber
+ })
+
+ return vs
+}
+
+func (d *DFA) String(literalNames []string, symbolicNames []string) string {
+ if d.getS0() == nil {
+ return ""
+ }
+
+ return NewDFASerializer(d, literalNames, symbolicNames).String()
+}
+
+func (d *DFA) ToLexerString() string {
+ if d.getS0() == nil {
+ return ""
+ }
+
+ return NewLexerDFASerializer(d).String()
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
new file mode 100644
index 0000000..84d0a31
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
@@ -0,0 +1,158 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// DFASerializer is a DFA walker that knows how to dump them to serialized
+// strings.
+type DFASerializer struct {
+ dfa *DFA
+ literalNames []string
+ symbolicNames []string
+}
+
+func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
+ if literalNames == nil {
+ literalNames = make([]string, 0)
+ }
+
+ if symbolicNames == nil {
+ symbolicNames = make([]string, 0)
+ }
+
+ return &DFASerializer{
+ dfa: dfa,
+ literalNames: literalNames,
+ symbolicNames: symbolicNames,
+ }
+}
+
+func (d *DFASerializer) String() string {
+ if d.dfa.getS0() == nil {
+ return ""
+ }
+
+ buf := ""
+ states := d.dfa.sortedStates()
+
+ for _, s := range states {
+ if s.edges != nil {
+ n := len(s.edges)
+
+ for j := 0; j < n; j++ {
+ t := s.edges[j]
+
+ if t != nil && t.stateNumber != 0x7FFFFFFF {
+ buf += d.GetStateString(s)
+ buf += "-"
+ buf += d.getEdgeLabel(j)
+ buf += "->"
+ buf += d.GetStateString(t)
+ buf += "\n"
+ }
+ }
+ }
+ }
+
+ if len(buf) == 0 {
+ return ""
+ }
+
+ return buf
+}
+
+func (d *DFASerializer) getEdgeLabel(i int) string {
+ if i == 0 {
+ return "EOF"
+ } else if d.literalNames != nil && i-1 < len(d.literalNames) {
+ return d.literalNames[i-1]
+ } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) {
+ return d.symbolicNames[i-1]
+ }
+
+ return strconv.Itoa(i - 1)
+}
+
+func (d *DFASerializer) GetStateString(s *DFAState) string {
+ var a, b string
+
+ if s.isAcceptState {
+ a = ":"
+ }
+
+ if s.requiresFullContext {
+ b = "^"
+ }
+
+ baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b
+
+ if s.isAcceptState {
+ if s.predicates != nil {
+ return baseStateStr + "=>" + fmt.Sprint(s.predicates)
+ }
+
+ return baseStateStr + "=>" + fmt.Sprint(s.prediction)
+ }
+
+ return baseStateStr
+}
+
+type LexerDFASerializer struct {
+ *DFASerializer
+}
+
+func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
+ return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)}
+}
+
+func (l *LexerDFASerializer) getEdgeLabel(i int) string {
+ var sb strings.Builder
+ sb.Grow(6)
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(i))
+ sb.WriteByte('\'')
+ return sb.String()
+}
+
+func (l *LexerDFASerializer) String() string {
+ if l.dfa.getS0() == nil {
+ return ""
+ }
+
+ buf := ""
+ states := l.dfa.sortedStates()
+
+ for i := 0; i < len(states); i++ {
+ s := states[i]
+
+ if s.edges != nil {
+ n := len(s.edges)
+
+ for j := 0; j < n; j++ {
+ t := s.edges[j]
+
+ if t != nil && t.stateNumber != 0x7FFFFFFF {
+ buf += l.GetStateString(s)
+ buf += "-"
+ buf += l.getEdgeLabel(j)
+ buf += "->"
+ buf += l.GetStateString(t)
+ buf += "\n"
+ }
+ }
+ }
+ }
+
+ if len(buf) == 0 {
+ return ""
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
new file mode 100644
index 0000000..c90dec5
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
@@ -0,0 +1,169 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// PredPrediction maps a predicate to a predicted alternative.
+type PredPrediction struct {
+ alt int
+ pred SemanticContext
+}
+
+func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction {
+ return &PredPrediction{alt: alt, pred: pred}
+}
+
+func (p *PredPrediction) String() string {
+ return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
+}
+
+// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
+// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
+// states the ATN can be in after reading each input symbol. That is to say,
+// after reading input a1a2..an, the DFA is in a state that represents the
+// subset T of the states of the ATN that are reachable from the ATN's start
+// state along some path labeled a1a2..an." In conventional NFA-to-DFA
+// conversion, therefore, the subset T would be a bitset representing the set of
+// states the ATN could be in. We need to track the alt predicted by each state
+// as well, however. More importantly, we need to maintain a stack of states,
+// tracking the closure operations as they jump from rule to rule, emulating
+// rule invocations (method calls). I have to add a stack to simulate the proper
+// lookahead sequences for the underlying LL grammar from which the ATN was
+// derived.
+//
+// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
+// state (ala normal conversion) and a RuleContext describing the chain of rules
+// (if any) followed to arrive at that state.
+//
+// A DFAState may have multiple references to a particular state, but with
+// different ATN contexts (with same or different alts) meaning that state was
+// reached via a different set of rule invocations.
+type DFAState struct {
+ stateNumber int
+ configs ATNConfigSet
+
+ // edges elements point to the target of the symbol. Shift up by 1 so (-1)
+ // Token.EOF maps to the first element.
+ edges []*DFAState
+
+ isAcceptState bool
+
+ // prediction is the ttype we match or alt we predict if the state is accept.
+ // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
+ // requiresFullContext.
+ prediction int
+
+ lexerActionExecutor *LexerActionExecutor
+
+ // requiresFullContext indicates it was created during an SLL prediction that
+ // discovered a conflict between the configurations in the state. Future
+ // ParserATNSimulator.execATN invocations immediately jump doing
+ // full context prediction if true.
+ requiresFullContext bool
+
+ // predicates is the predicates associated with the ATN configurations of the
+ // DFA state during SLL parsing. When we have predicates, requiresFullContext
+ // is false, since full context prediction evaluates predicates on-the-fly. If
+ // d is
+ // not nil, then prediction is ATN.INVALID_ALT_NUMBER.
+ //
+ // We only use these for non-requiresFullContext but conflicting states. That
+ // means we know from the context (it's $ or we don't dip into outer context)
+ // that it's an ambiguity not a conflict.
+ //
+ // This list is computed by
+ // ParserATNSimulator.predicateDFAState.
+ predicates []*PredPrediction
+}
+
+func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
+ if configs == nil {
+ configs = NewBaseATNConfigSet(false)
+ }
+
+ return &DFAState{configs: configs, stateNumber: stateNumber}
+}
+
+// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
+func (d *DFAState) GetAltSet() []int {
+ var alts []int
+
+ if d.configs != nil {
+ for _, c := range d.configs.GetItems() {
+ alts = append(alts, c.GetAlt())
+ }
+ }
+
+ if len(alts) == 0 {
+ return nil
+ }
+
+ return alts
+}
+
+func (d *DFAState) getEdges() []*DFAState {
+ return d.edges
+}
+
+func (d *DFAState) numEdges() int {
+ return len(d.edges)
+}
+
+func (d *DFAState) getIthEdge(i int) *DFAState {
+ return d.edges[i]
+}
+
+func (d *DFAState) setEdges(newEdges []*DFAState) {
+ d.edges = newEdges
+}
+
+func (d *DFAState) setIthEdge(i int, edge *DFAState) {
+ d.edges[i] = edge
+}
+
+func (d *DFAState) setPrediction(v int) {
+ d.prediction = v
+}
+
+func (d *DFAState) String() string {
+ var s string
+ if d.isAcceptState {
+ if d.predicates != nil {
+ s = "=>" + fmt.Sprint(d.predicates)
+ } else {
+ s = "=>" + fmt.Sprint(d.prediction)
+ }
+ }
+
+ return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s)
+}
+
+func (d *DFAState) Hash() int {
+ h := murmurInit(7)
+ h = murmurUpdate(h, d.configs.Hash())
+ return murmurFinish(h, 1)
+}
+
+// Equals returns whether d equals other. Two DFAStates are equal if their ATN
+// configuration sets are the same. This method is used to see if a state
+// already exists.
+//
+// Because the number of alternatives and number of ATN configurations are
+// finite, there is a finite number of DFA states that can be processed. This is
+// necessary to show that the algorithm terminates.
+//
+// Cannot test the DFA state numbers here because in
+// ParserATNSimulator.addDFAState we need to know if any other state exists that
+// has d exact set of ATN configurations. The stateNumber is irrelevant.
+func (d *DFAState) Equals(o Collectable[*DFAState]) bool {
+ if d == o {
+ return true
+ }
+
+ return d.configs.Equals(o.(*DFAState).configs)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
new file mode 100644
index 0000000..c55bcc1
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
@@ -0,0 +1,109 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+//
+// This implementation of {@link ANTLRErrorListener} can be used to identify
+// certain potential correctness and performance problems in grammars. "reports"
+// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate
+// message.
+//
+//
+//
Ambiguities: These are cases where more than one path through the
+// grammar can Match the input.
+//
Weak context sensitivity: These are cases where full-context
+// prediction resolved an SLL conflict to a unique alternative which equaled the
+// minimum alternative of the SLL conflict.
+//
Strong (forced) context sensitivity: These are cases where the
+// full-context prediction resolved an SLL conflict to a unique alternative,
+// and the minimum alternative of the SLL conflict was found to not be
+// a truly viable alternative. Two-stage parsing cannot be used for inputs where
+// d situation occurs.
+//
+
+type DiagnosticErrorListener struct {
+ *DefaultErrorListener
+
+ exactOnly bool
+}
+
+func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
+
+ n := new(DiagnosticErrorListener)
+
+ // whether all ambiguities or only exact ambiguities are Reported.
+ n.exactOnly = exactOnly
+ return n
+}
+
+func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+ if d.exactOnly && !exact {
+ return
+ }
+ msg := "reportAmbiguity d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ": ambigAlts=" +
+ d.getConflictingAlts(ambigAlts, configs).String() +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+
+ msg := "reportAttemptingFullContext d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
+ msg := "reportContextSensitivity d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string {
+ decision := dfa.decision
+ ruleIndex := dfa.atnStartState.GetRuleIndex()
+
+ ruleNames := recognizer.GetRuleNames()
+ if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
+ return strconv.Itoa(decision)
+ }
+ ruleName := ruleNames[ruleIndex]
+ if ruleName == "" {
+ return strconv.Itoa(decision)
+ }
+ return strconv.Itoa(decision) + " (" + ruleName + ")"
+}
+
+// Computes the set of conflicting or ambiguous alternatives from a
+// configuration set, if that information was not already provided by the
+// parser.
+//
+// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
+// Reported by the parser.
+// @param configs The conflicting or ambiguous configuration set.
+// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
+// returns the set of alternatives represented in {@code configs}.
+func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
+ if ReportedAlts != nil {
+ return ReportedAlts
+ }
+ result := NewBitSet()
+ for _, c := range set.GetItems() {
+ result.add(c.GetAlt())
+ }
+
+ return result
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
new file mode 100644
index 0000000..f679f0d
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
@@ -0,0 +1,104 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+// Provides an empty default implementation of {@link ANTLRErrorListener}. The
+// default implementation of each method does nothing, but can be overridden as
+// necessary.
+
+type ErrorListener interface {
+ SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
+ ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
+ ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
+ ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
+}
+
+type DefaultErrorListener struct {
+}
+
+func NewDefaultErrorListener() *DefaultErrorListener {
+ return new(DefaultErrorListener)
+}
+
+func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+}
+
+func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
+}
+
+type ConsoleErrorListener struct {
+ *DefaultErrorListener
+}
+
+func NewConsoleErrorListener() *ConsoleErrorListener {
+ return new(ConsoleErrorListener)
+}
+
+// Provides a default instance of {@link ConsoleErrorListener}.
+var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
+
+// {@inheritDoc}
+//
+//
+// This implementation prints messages to {@link System//err} containing the
+// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
+// the following format.
+//
+//
+// line line:charPositionInLinemsg
+//
+func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
+}
+
+type ProxyErrorListener struct {
+ *DefaultErrorListener
+ delegates []ErrorListener
+}
+
+func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
+ if delegates == nil {
+ panic("delegates is not provided")
+ }
+ l := new(ProxyErrorListener)
+ l.delegates = delegates
+ return l
+}
+
+func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ for _, d := range p.delegates {
+ d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
new file mode 100644
index 0000000..5c0a637
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
@@ -0,0 +1,734 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+type ErrorStrategy interface {
+ reset(Parser)
+ RecoverInline(Parser) Token
+ Recover(Parser, RecognitionException)
+ Sync(Parser)
+ InErrorRecoveryMode(Parser) bool
+ ReportError(Parser, RecognitionException)
+ ReportMatch(Parser)
+}
+
+// This is the default implementation of {@link ANTLRErrorStrategy} used for
+// error Reporting and recovery in ANTLR parsers.
+type DefaultErrorStrategy struct {
+ errorRecoveryMode bool
+ lastErrorIndex int
+ lastErrorStates *IntervalSet
+}
+
+var _ ErrorStrategy = &DefaultErrorStrategy{}
+
+func NewDefaultErrorStrategy() *DefaultErrorStrategy {
+
+ d := new(DefaultErrorStrategy)
+
+ // Indicates whether the error strategy is currently "recovering from an
+ // error". This is used to suppress Reporting multiple error messages while
+ // attempting to recover from a detected syntax error.
+ //
+ // @see //InErrorRecoveryMode
+ //
+ d.errorRecoveryMode = false
+
+ // The index into the input stream where the last error occurred.
+ // This is used to prevent infinite loops where an error is found
+ // but no token is consumed during recovery...another error is found,
+ // ad nauseum. This is a failsafe mechanism to guarantee that at least
+ // one token/tree node is consumed for two errors.
+ //
+ d.lastErrorIndex = -1
+ d.lastErrorStates = nil
+ return d
+}
+
+//
The default implementation simply calls {@link //endErrorCondition} to
+// ensure that the handler is not in error recovery mode.
+func (d *DefaultErrorStrategy) reset(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+// This method is called to enter error recovery mode when a recognition
+// exception is Reported.
+//
+// @param recognizer the parser instance
+func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
+ d.errorRecoveryMode = true
+}
+
+func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool {
+ return d.errorRecoveryMode
+}
+
+// This method is called to leave error recovery mode after recovering from
+// a recognition exception.
+//
+// @param recognizer
+func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
+ d.errorRecoveryMode = false
+ d.lastErrorStates = nil
+ d.lastErrorIndex = -1
+}
+
+// {@inheritDoc}
+//
+//
The default implementation simply calls {@link //endErrorCondition}.
The default implementation returns immediately if the handler is already
+// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
+// and dispatches the Reporting task based on the runtime type of {@code e}
+// according to the following table.
+//
+//
+//
{@link NoViableAltException}: Dispatches the call to
+// {@link //ReportNoViableAlternative}
+//
{@link InputMisMatchException}: Dispatches the call to
+// {@link //ReportInputMisMatch}
+//
{@link FailedPredicateException}: Dispatches the call to
+// {@link //ReportFailedPredicate}
+//
All other types: calls {@link Parser//NotifyErrorListeners} to Report
+// the exception
+//
+func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
+ // if we've already Reported an error and have not Matched a token
+ // yet successfully, don't Report any errors.
+ if d.InErrorRecoveryMode(recognizer) {
+ return // don't Report spurious errors
+ }
+ d.beginErrorCondition(recognizer)
+
+ switch t := e.(type) {
+ default:
+ fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
+ // fmt.Println(e.stack)
+ recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
+ case *NoViableAltException:
+ d.ReportNoViableAlternative(recognizer, t)
+ case *InputMisMatchException:
+ d.ReportInputMisMatch(recognizer, t)
+ case *FailedPredicateException:
+ d.ReportFailedPredicate(recognizer, t)
+ }
+}
+
+// {@inheritDoc}
+//
+//
The default implementation reSynchronizes the parser by consuming tokens
+// until we find one in the reSynchronization set--loosely the set of tokens
+// that can follow the current rule.
+func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
+
+ if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
+ d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
+ // uh oh, another error at same token index and previously-Visited
+ // state in ATN must be a case where LT(1) is in the recovery
+ // token set so nothing got consumed. Consume a single token
+ // at least to prevent an infinite loop d is a failsafe.
+ recognizer.Consume()
+ }
+ d.lastErrorIndex = recognizer.GetInputStream().Index()
+ if d.lastErrorStates == nil {
+ d.lastErrorStates = NewIntervalSet()
+ }
+ d.lastErrorStates.addOne(recognizer.GetState())
+ followSet := d.getErrorRecoverySet(recognizer)
+ d.consumeUntil(recognizer, followSet)
+}
+
+// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
+// that the current lookahead symbol is consistent with what were expecting
+// at d point in the ATN. You can call d anytime but ANTLR only
+// generates code to check before subrules/loops and each iteration.
+//
+//
Implements Jim Idle's magic Sync mechanism in closures and optional
+// subrules. E.g.,
+//
+//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+//
+//
+// At the start of a sub rule upon error, {@link //Sync} performs single
+// token deletion, if possible. If it can't do that, it bails on the current
+// rule and uses the default error recovery, which consumes until the
+// reSynchronization set of the current rule.
+//
+//
If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
+// with an empty alternative), then the expected set includes what follows
+// the subrule.
+//
+//
During loop iteration, it consumes until it sees a token that can start a
+// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
+// stay in the loop as long as possible.
+//
+//
ORIGINS
+//
+//
Previous versions of ANTLR did a poor job of their recovery within loops.
+// A single mismatch token or missing token would force the parser to bail
+// out of the entire rules surrounding the loop. So, for rule
+//
+//
+// classfunc : 'class' ID '{' member* '}'
+//
+//
+// input with an extra token between members would force the parser to
+// consume until it found the next class definition rather than the next
+// member definition of the current class.
+//
+//
This functionality cost a little bit of effort because the parser has to
+// compare token set at the start of the loop and at each iteration. If for
+// some reason speed is suffering for you, you can turn off d
+// functionality by simply overriding d method as a blank { }.
+func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
+ // If already recovering, don't try to Sync
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ la := recognizer.GetTokenStream().LA(1)
+
+ // try cheaper subset first might get lucky. seems to shave a wee bit off
+ nextTokens := recognizer.GetATN().NextTokens(s, nil)
+ if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
+ return
+ }
+
+ switch s.GetStateType() {
+ case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
+ // Report error and recover if possible
+ if d.SingleTokenDeletion(recognizer) != nil {
+ return
+ }
+ panic(NewInputMisMatchException(recognizer))
+ case ATNStatePlusLoopBack, ATNStateStarLoopBack:
+ d.ReportUnwantedToken(recognizer)
+ expecting := NewIntervalSet()
+ expecting.addSet(recognizer.GetExpectedTokens())
+ whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
+ d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
+ default:
+ // do nothing if we can't identify the exact kind of ATN state
+ }
+}
+
+// This is called by {@link //ReportError} when the exception is a
+// {@link NoViableAltException}.
+//
+// @see //ReportError
+//
+// @param recognizer the parser instance
+// @param e the recognition exception
+func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
+ tokens := recognizer.GetTokenStream()
+ var input string
+ if tokens != nil {
+ if e.startToken.GetTokenType() == TokenEOF {
+ input = ""
+ } else {
+ input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
+ }
+ } else {
+ input = ""
+ }
+ msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// This is called by {@link //ReportError} when the exception is an
+// {@link InputMisMatchException}.
+//
+// @see //ReportError
+//
+// @param recognizer the parser instance
+// @param e the recognition exception
+func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
+ msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
+ " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// This is called by {@link //ReportError} when the exception is a
+// {@link FailedPredicateException}.
+//
+// @see //ReportError
+//
+// @param recognizer the parser instance
+// @param e the recognition exception
+func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
+ ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
+ msg := "rule " + ruleName + " " + e.message
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// This method is called to Report a syntax error which requires the removal
+// of a token from the input stream. At the time d method is called, the
+// erroneous symbol is current {@code LT(1)} symbol and has not yet been
+// removed from the input stream. When d method returns,
+// {@code recognizer} is in error recovery mode.
+//
+//
This method is called when {@link //singleTokenDeletion} identifies
+// single-token deletion as a viable recovery strategy for a mismatched
+// input error.
+//
+//
The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
+// enter error recovery mode, followed by calling
+// {@link Parser//NotifyErrorListeners}.
+//
+// @param recognizer the parser instance
+func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ tokenName := d.GetTokenErrorDisplay(t)
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "extraneous input " + tokenName + " expecting " +
+ expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// This method is called to Report a syntax error which requires the
+// insertion of a missing token into the input stream. At the time d
+// method is called, the missing token has not yet been inserted. When d
+// method returns, {@code recognizer} is in error recovery mode.
+//
+//
This method is called when {@link //singleTokenInsertion} identifies
+// single-token insertion as a viable recovery strategy for a mismatched
+// input error.
+//
+//
The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
+// enter error recovery mode, followed by calling
+// {@link Parser//NotifyErrorListeners}.
The default implementation attempts to recover from the mismatched input
+// by using single token insertion and deletion as described below. If the
+// recovery attempt fails, d method panics an
+// {@link InputMisMatchException}.
+//
+//
EXTRA TOKEN (single token deletion)
+//
+//
{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
+// right token, however, then assume {@code LA(1)} is some extra spurious
+// token and delete it. Then consume and return the next token (which was
+// the {@code LA(2)} token) as the successful result of the Match operation.
+//
+//
This recovery strategy is implemented by {@link
+// //singleTokenDeletion}.
+//
+//
MISSING TOKEN (single token insertion)
+//
+//
If current token (at {@code LA(1)}) is consistent with what could come
+// after the expected {@code LA(1)} token, then assume the token is missing
+// and use the parser's {@link TokenFactory} to create it on the fly. The
+// "insertion" is performed by returning the created token as the successful
+// result of the Match operation.
+//
+//
This recovery strategy is implemented by {@link
+// //singleTokenInsertion}.
+//
+//
EXAMPLE
+//
+//
For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
+// the parser returns from the nested call to {@code expr}, it will have
+// call chain:
+//
+//
+// stat &rarr expr &rarr atom
+//
+//
+// and it will be trying to Match the {@code ')'} at d point in the
+// derivation:
+//
+//
+// => ID '=' '(' INT ')' ('+' atom)* ”
+// ^
+//
+//
+// The attempt to Match {@code ')'} will fail when it sees {@code ”} and
+// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”}
+// is in the set of tokens that can follow the {@code ')'} token reference
+// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
+func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
+ // SINGLE TOKEN DELETION
+ MatchedSymbol := d.SingleTokenDeletion(recognizer)
+ if MatchedSymbol != nil {
+ // we have deleted the extra token.
+ // now, move past ttype token as if all were ok
+ recognizer.Consume()
+ return MatchedSymbol
+ }
+ // SINGLE TOKEN INSERTION
+ if d.SingleTokenInsertion(recognizer) {
+ return d.GetMissingSymbol(recognizer)
+ }
+ // even that didn't work must panic the exception
+ panic(NewInputMisMatchException(recognizer))
+}
+
+// This method implements the single-token insertion inline error recovery
+// strategy. It is called by {@link //recoverInline} if the single-token
+// deletion strategy fails to recover from the mismatched input. If this
+// method returns {@code true}, {@code recognizer} will be in error recovery
+// mode.
+//
+//
This method determines whether or not single-token insertion is viable by
+// checking if the {@code LA(1)} input symbol could be successfully Matched
+// if it were instead the {@code LA(2)} symbol. If d method returns
+// {@code true}, the caller is responsible for creating and inserting a
+// token with the correct type to produce d behavior.
+//
+// @param recognizer the parser instance
+// @return {@code true} if single-token insertion is a viable recovery
+// strategy for the current mismatched input, otherwise {@code false}
+func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
+ currentSymbolType := recognizer.GetTokenStream().LA(1)
+ // if current token is consistent with what could come after current
+ // ATN state, then we know we're missing a token error recovery
+ // is free to conjure up and insert the missing token
+ atn := recognizer.GetInterpreter().atn
+ currentState := atn.states[recognizer.GetState()]
+ next := currentState.GetTransitions()[0].getTarget()
+ expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
+ if expectingAtLL2.contains(currentSymbolType) {
+ d.ReportMissingToken(recognizer)
+ return true
+ }
+
+ return false
+}
+
+// This method implements the single-token deletion inline error recovery
+// strategy. It is called by {@link //recoverInline} to attempt to recover
+// from mismatched input. If this method returns nil, the parser and error
+// handler state will not have changed. If this method returns non-nil,
+// {@code recognizer} will not be in error recovery mode since the
+// returned token was a successful Match.
+//
+//
If the single-token deletion is successful, d method calls
+// {@link //ReportUnwantedToken} to Report the error, followed by
+// {@link Parser//consume} to actually "delete" the extraneous token. Then,
+// before returning {@link //ReportMatch} is called to signal a successful
+// Match.
+//
+// @param recognizer the parser instance
+// @return the successfully Matched {@link Token} instance if single-token
+// deletion successfully recovers from the mismatched input, otherwise
+// {@code nil}
+func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
+ NextTokenType := recognizer.GetTokenStream().LA(2)
+ expecting := d.GetExpectedTokens(recognizer)
+ if expecting.contains(NextTokenType) {
+ d.ReportUnwantedToken(recognizer)
+ // print("recoverFromMisMatchedToken deleting " \
+ // + str(recognizer.GetTokenStream().LT(1)) \
+ // + " since " + str(recognizer.GetTokenStream().LT(2)) \
+ // + " is what we want", file=sys.stderr)
+ recognizer.Consume() // simply delete extra token
+ // we want to return the token we're actually Matching
+ MatchedSymbol := recognizer.GetCurrentToken()
+ d.ReportMatch(recognizer) // we know current token is correct
+ return MatchedSymbol
+ }
+
+ return nil
+}
+
+// Conjure up a missing token during error recovery.
+//
+// The recognizer attempts to recover from single missing
+// symbols. But, actions might refer to that missing symbol.
+// For example, x=ID {f($x)}. The action clearly assumes
+// that there has been an identifier Matched previously and that
+// $x points at that token. If that token is missing, but
+// the next token in the stream is what we want we assume that
+// d token is missing and we keep going. Because we
+// have to return some token to replace the missing token,
+// we have to conjure one up. This method gives the user control
+// over the tokens returned for missing tokens. Mostly,
+// you will want to create something special for identifier
+// tokens. For literals such as '{' and ',', the default
+// action in the parser or tree parser works. It simply creates
+// a CommonToken of the appropriate type. The text will be the token.
+// If you change what tokens must be created by the lexer,
+// override d method to create the appropriate tokens.
+func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
+ currentSymbol := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ expectedTokenType := expecting.first()
+ var tokenText string
+
+ if expectedTokenType == TokenEOF {
+ tokenText = ""
+ } else {
+ ln := recognizer.GetLiteralNames()
+ if expectedTokenType > 0 && expectedTokenType < len(ln) {
+ tokenText = ""
+ } else {
+ tokenText = "" // TODO matches the JS impl
+ }
+ }
+ current := currentSymbol
+ lookback := recognizer.GetTokenStream().LT(-1)
+ if current.GetTokenType() == TokenEOF && lookback != nil {
+ current = lookback
+ }
+
+ tf := recognizer.GetTokenFactory()
+
+ return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
+}
+
+func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
+ return recognizer.GetExpectedTokens()
+}
+
+// How should a token be displayed in an error message? The default
+// is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
+func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ return d.escapeWSAndQuote(s)
+}
+
+func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ return "'" + s + "'"
+}
+
+// Compute the error recovery set for the current rule. During
+// rule invocation, the parser pushes the set of tokens that can
+// follow that rule reference on the stack d amounts to
+// computing FIRST of what follows the rule reference in the
+// enclosing rule. See LinearApproximator.FIRST().
+// This local follow set only includes tokens
+// from within the rule i.e., the FIRST computation done by
+// ANTLR stops at the end of a rule.
+//
+// # EXAMPLE
+//
+// When you find a "no viable alt exception", the input is not
+// consistent with any of the alternatives for rule r. The best
+// thing to do is to consume tokens until you see something that
+// can legally follow a call to r//or* any rule that called r.
+// You don't want the exact set of viable next tokens because the
+// input might just be missing a token--you might consume the
+// rest of the input looking for one of the missing tokens.
+//
+// Consider grammar:
+//
+// a : '[' b ']'
+// | '(' b ')'
+//
+// b : c '^' INT
+// c : ID
+// | INT
+//
+// At each rule invocation, the set of tokens that could follow
+// that rule is pushed on a stack. Here are the various
+// context-sensitive follow sets:
+//
+// FOLLOW(b1_in_a) = FIRST(']') = ']'
+// FOLLOW(b2_in_a) = FIRST(')') = ')'
+// FOLLOW(c_in_b) = FIRST('^') = '^'
+//
+// Upon erroneous input "[]", the call chain is
+//
+// a -> b -> c
+//
+// and, hence, the follow context stack is:
+//
+// depth follow set start of rule execution
+// 0 a (from main())
+// 1 ']' b
+// 2 '^' c
+//
+// Notice that ')' is not included, because b would have to have
+// been called from a different context in rule a for ')' to be
+// included.
+//
+// For error recovery, we cannot consider FOLLOW(c)
+// (context-sensitive or otherwise). We need the combined set of
+// all context-sensitive FOLLOW sets--the set of all tokens that
+// could follow any reference in the call chain. We need to
+// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
+// we reSync'd to that token, we'd consume until EOF. We need to
+// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
+// In this case, for input "[]", LA(1) is ']' and in the set, so we would
+// not consume anything. After printing an error, rule c would
+// return normally. Rule b would not find the required '^' though.
+// At this point, it gets a mismatched token error and panics an
+// exception (since LA(1) is not in the viable following token
+// set). The rule exception handler tries to recover, but finds
+// the same recovery set and doesn't consume anything. Rule b
+// exits normally returning to rule a. Now it finds the ']' (and
+// with the successful Match exits errorRecovery mode).
+//
+// So, you can see that the parser walks up the call chain looking
+// for the token that was a member of the recovery set.
+//
+// Errors are not generated in errorRecovery mode.
+//
+// ANTLR's error recovery mechanism is based upon original ideas:
+//
+// "Algorithms + Data Structures = Programs" by Niklaus Wirth
+//
+// and
+//
+// "A note on error recovery in recursive descent parsers":
+// http://portal.acm.org/citation.cfm?id=947902.947905
+//
+// Later, Josef Grosch had some good ideas:
+//
+// "Efficient and Comfortable Error Recovery in Recursive Descent
+// Parsers":
+// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+//
+// Like Grosch I implement context-sensitive FOLLOW sets that are combined
+// at run-time upon error to avoid overhead during parsing.
+func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
+ atn := recognizer.GetInterpreter().atn
+ ctx := recognizer.GetParserRuleContext()
+ recoverSet := NewIntervalSet()
+ for ctx != nil && ctx.GetInvokingState() >= 0 {
+ // compute what follows who invoked us
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ recoverSet.addSet(follow)
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ recoverSet.removeOne(TokenEpsilon)
+ return recoverSet
+}
+
+// Consume tokens until one Matches the given token set.//
+func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
+ ttype := recognizer.GetTokenStream().LA(1)
+ for ttype != TokenEOF && !set.contains(ttype) {
+ recognizer.Consume()
+ ttype = recognizer.GetTokenStream().LA(1)
+ }
+}
+
+//
+// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
+// by immediately canceling the parse operation with a
+// {@link ParseCancellationException}. The implementation ensures that the
+// {@link ParserRuleContext//exception} field is set for all parse tree nodes
+// that were not completed prior to encountering the error.
+//
+//
+// This error strategy is useful in the following scenarios.
+//
+//
+//
Two-stage parsing: This error strategy allows the first
+// stage of two-stage parsing to immediately terminate if an error is
+// encountered, and immediately fall back to the second stage. In addition to
+// avoiding wasted work by attempting to recover from errors here, the empty
+// implementation of {@link BailErrorStrategy//Sync} improves the performance of
+// the first stage.
+//
Silent validation: When syntax errors are not being
+// Reported or logged, and the parse result is simply ignored if errors occur,
+// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
+// when the result will be ignored either way.
+//
+// @see Parser//setErrorHandler(ANTLRErrorStrategy)
+
+type BailErrorStrategy struct {
+ *DefaultErrorStrategy
+}
+
+var _ ErrorStrategy = &BailErrorStrategy{}
+
+func NewBailErrorStrategy() *BailErrorStrategy {
+
+ b := new(BailErrorStrategy)
+
+ b.DefaultErrorStrategy = NewDefaultErrorStrategy()
+
+ return b
+}
+
+// Instead of recovering from exception {@code e}, re-panic it wrapped
+// in a {@link ParseCancellationException} so it is not caught by the
+// rule func catches. Use {@link Exception//getCause()} to get the
+// original {@link RecognitionException}.
+func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
+ context := recognizer.GetParserRuleContext()
+ for context != nil {
+ context.SetException(e)
+ if parent, ok := context.GetParent().(ParserRuleContext); ok {
+ context = parent
+ } else {
+ context = nil
+ }
+ }
+ panic(NewParseCancellationException()) // TODO we don't emit e properly
+}
+
+// Make sure we don't attempt to recover inline if the parser
+// successfully recovers, it won't panic an exception.
+func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
+ b.Recover(recognizer, NewInputMisMatchException(recognizer))
+
+ return nil
+}
+
+// Make sure we don't attempt to recover from problems in subrules.//
+func (b *BailErrorStrategy) Sync(recognizer Parser) {
+ // pass
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
new file mode 100644
index 0000000..3954c13
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
@@ -0,0 +1,238 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
+// 3 kinds of errors: prediction errors, failed predicate errors, and
+// mismatched input errors. In each case, the parser knows where it is
+// in the input, where it is in the ATN, the rule invocation stack,
+// and what kind of problem occurred.
+
+type RecognitionException interface {
+ GetOffendingToken() Token
+ GetMessage() string
+ GetInputStream() IntStream
+}
+
+type BaseRecognitionException struct {
+ message string
+ recognizer Recognizer
+ offendingToken Token
+ offendingState int
+ ctx RuleContext
+ input IntStream
+}
+
+func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
+
+ // todo
+ // Error.call(this)
+ //
+ // if (!!Error.captureStackTrace) {
+ // Error.captureStackTrace(this, RecognitionException)
+ // } else {
+ // stack := NewError().stack
+ // }
+ // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
+
+ t := new(BaseRecognitionException)
+
+ t.message = message
+ t.recognizer = recognizer
+ t.input = input
+ t.ctx = ctx
+ // The current {@link Token} when an error occurred. Since not all streams
+ // support accessing symbols by index, we have to track the {@link Token}
+ // instance itself.
+ t.offendingToken = nil
+ // Get the ATN state number the parser was in at the time the error
+ // occurred. For {@link NoViableAltException} and
+ // {@link LexerNoViableAltException} exceptions, this is the
+ // {@link DecisionState} number. For others, it is the state whose outgoing
+ // edge we couldn't Match.
+ t.offendingState = -1
+ if t.recognizer != nil {
+ t.offendingState = t.recognizer.GetState()
+ }
+
+ return t
+}
+
+func (b *BaseRecognitionException) GetMessage() string {
+ return b.message
+}
+
+func (b *BaseRecognitionException) GetOffendingToken() Token {
+ return b.offendingToken
+}
+
+func (b *BaseRecognitionException) GetInputStream() IntStream {
+ return b.input
+}
+
+//
If the state number is not known, b method returns -1.
+
+// Gets the set of input symbols which could potentially follow the
+// previously Matched symbol at the time b exception was panicn.
+//
+//
If the set of expected tokens is not known and could not be computed,
+// b method returns {@code nil}.
+//
+// @return The set of token types that could potentially follow the current
+// state in the ATN, or {@code nil} if the information is not available.
+// /
+func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
+ if b.recognizer != nil {
+ return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
+ }
+
+ return nil
+}
+
+func (b *BaseRecognitionException) String() string {
+ return b.message
+}
+
+type LexerNoViableAltException struct {
+ *BaseRecognitionException
+
+ startIndex int
+ deadEndConfigs ATNConfigSet
+}
+
+func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
+
+ l := new(LexerNoViableAltException)
+
+ l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
+
+ l.startIndex = startIndex
+ l.deadEndConfigs = deadEndConfigs
+
+ return l
+}
+
+func (l *LexerNoViableAltException) String() string {
+ symbol := ""
+ if l.startIndex >= 0 && l.startIndex < l.input.Size() {
+ symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
+ }
+ return "LexerNoViableAltException" + symbol
+}
+
+type NoViableAltException struct {
+ *BaseRecognitionException
+
+ startToken Token
+ offendingToken Token
+ ctx ParserRuleContext
+ deadEndConfigs ATNConfigSet
+}
+
+// Indicates that the parser could not decide which of two or more paths
+// to take based upon the remaining input. It tracks the starting token
+// of the offending input and also knows where the parser was
+// in the various paths when the error. Reported by ReportNoViableAlternative()
+func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
+
+ if ctx == nil {
+ ctx = recognizer.GetParserRuleContext()
+ }
+
+ if offendingToken == nil {
+ offendingToken = recognizer.GetCurrentToken()
+ }
+
+ if startToken == nil {
+ startToken = recognizer.GetCurrentToken()
+ }
+
+ if input == nil {
+ input = recognizer.GetInputStream().(TokenStream)
+ }
+
+ n := new(NoViableAltException)
+ n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
+
+ // Which configurations did we try at input.Index() that couldn't Match
+ // input.LT(1)?//
+ n.deadEndConfigs = deadEndConfigs
+ // The token object at the start index the input stream might
+ // not be buffering tokens so get a reference to it. (At the
+ // time the error occurred, of course the stream needs to keep a
+ // buffer all of the tokens but later we might not have access to those.)
+ n.startToken = startToken
+ n.offendingToken = offendingToken
+
+ return n
+}
+
+type InputMisMatchException struct {
+ *BaseRecognitionException
+}
+
+// This signifies any kind of mismatched input exceptions such as
+// when the current input does not Match the expected token.
+func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
+
+ i := new(InputMisMatchException)
+ i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ i.offendingToken = recognizer.GetCurrentToken()
+
+ return i
+
+}
+
+// A semantic predicate failed during validation. Validation of predicates
+// occurs when normally parsing the alternative just like Matching a token.
+// Disambiguating predicate evaluation occurs when we test a predicate during
+// prediction.
+
+type FailedPredicateException struct {
+ *BaseRecognitionException
+
+ ruleIndex int
+ predicateIndex int
+ predicate string
+}
+
+func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
+
+ f := new(FailedPredicateException)
+
+ f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ trans := s.GetTransitions()[0]
+ if trans2, ok := trans.(*PredicateTransition); ok {
+ f.ruleIndex = trans2.ruleIndex
+ f.predicateIndex = trans2.predIndex
+ } else {
+ f.ruleIndex = 0
+ f.predicateIndex = 0
+ }
+ f.predicate = predicate
+ f.offendingToken = recognizer.GetCurrentToken()
+
+ return f
+}
+
+func (f *FailedPredicateException) formatMessage(predicate, message string) string {
+ if message != "" {
+ return message
+ }
+
+ return "failed predicate: {" + predicate + "}?"
+}
+
+type ParseCancellationException struct {
+}
+
+func NewParseCancellationException() *ParseCancellationException {
+ // Error.call(this)
+ // Error.captureStackTrace(this, ParseCancellationException)
+ return new(ParseCancellationException)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
new file mode 100644
index 0000000..bd6ad5e
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "io"
+ "os"
+)
+
+// This is an InputStream that is loaded from a file all at once
+// when you construct the object.
+
+type FileStream struct {
+ *InputStream
+
+ filename string
+}
+
+func NewFileStream(fileName string) (*FileStream, error) {
+
+ buf := bytes.NewBuffer(nil)
+
+ f, err := os.Open(fileName)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ _, err = io.Copy(buf, f)
+ if err != nil {
+ return nil, err
+ }
+
+ fs := new(FileStream)
+
+ fs.filename = fileName
+ s := string(buf.Bytes())
+
+ fs.InputStream = NewInputStream(s)
+
+ return fs, nil
+
+}
+
+func (f *FileStream) GetSourceName() string {
+ return f.filename
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
new file mode 100644
index 0000000..a8b889c
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type InputStream struct {
+ name string
+ index int
+ data []rune
+ size int
+}
+
+func NewInputStream(data string) *InputStream {
+
+ is := new(InputStream)
+
+ is.name = ""
+ is.index = 0
+ is.data = []rune(data)
+ is.size = len(is.data) // number of runes
+
+ return is
+}
+
+func (is *InputStream) reset() {
+ is.index = 0
+}
+
+func (is *InputStream) Consume() {
+ if is.index >= is.size {
+ // assert is.LA(1) == TokenEOF
+ panic("cannot consume EOF")
+ }
+ is.index++
+}
+
+func (is *InputStream) LA(offset int) int {
+
+ if offset == 0 {
+ return 0 // nil
+ }
+ if offset < 0 {
+ offset++ // e.g., translate LA(-1) to use offset=0
+ }
+ pos := is.index + offset - 1
+
+ if pos < 0 || pos >= is.size { // invalid
+ return TokenEOF
+ }
+
+ return int(is.data[pos])
+}
+
+func (is *InputStream) LT(offset int) int {
+ return is.LA(offset)
+}
+
+func (is *InputStream) Index() int {
+ return is.index
+}
+
+func (is *InputStream) Size() int {
+ return is.size
+}
+
+// mark/release do nothing we have entire buffer
+func (is *InputStream) Mark() int {
+ return -1
+}
+
+func (is *InputStream) Release(marker int) {
+}
+
+func (is *InputStream) Seek(index int) {
+ if index <= is.index {
+ is.index = index // just jump don't update stream state (line,...)
+ return
+ }
+ // seek forward
+ is.index = intMin(index, is.size)
+}
+
+func (is *InputStream) GetText(start int, stop int) string {
+ if stop >= is.size {
+ stop = is.size - 1
+ }
+ if start >= is.size {
+ return ""
+ }
+
+ return string(is.data[start : stop+1])
+}
+
+func (is *InputStream) GetTextFromTokens(start, stop Token) string {
+ if start != nil && stop != nil {
+ return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
+ }
+
+ return ""
+}
+
+func (is *InputStream) GetTextFromInterval(i *Interval) string {
+ return is.GetText(i.Start, i.Stop)
+}
+
+func (*InputStream) GetSourceName() string {
+ return "Obtained from string"
+}
+
+func (is *InputStream) String() string {
+ return string(is.data)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go
new file mode 100644
index 0000000..4778878
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type IntStream interface {
+ Consume()
+ LA(int) int
+ Mark() int
+ Release(marker int)
+ Index() int
+ Seek(index int)
+ Size() int
+ GetSourceName() string
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
new file mode 100644
index 0000000..c1e155e
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
@@ -0,0 +1,312 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type Interval struct {
+ Start int
+ Stop int
+}
+
+/* stop is not included! */
+func NewInterval(start, stop int) *Interval {
+ i := new(Interval)
+
+ i.Start = start
+ i.Stop = stop
+ return i
+}
+
+func (i *Interval) Contains(item int) bool {
+ return item >= i.Start && item < i.Stop
+}
+
+func (i *Interval) String() string {
+ if i.Start == i.Stop-1 {
+ return strconv.Itoa(i.Start)
+ }
+
+ return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
+}
+
+func (i *Interval) length() int {
+ return i.Stop - i.Start
+}
+
+type IntervalSet struct {
+ intervals []*Interval
+ readOnly bool
+}
+
+func NewIntervalSet() *IntervalSet {
+
+ i := new(IntervalSet)
+
+ i.intervals = nil
+ i.readOnly = false
+
+ return i
+}
+
+func (i *IntervalSet) first() int {
+ if len(i.intervals) == 0 {
+ return TokenInvalidType
+ }
+
+ return i.intervals[0].Start
+}
+
+func (i *IntervalSet) addOne(v int) {
+ i.addInterval(NewInterval(v, v+1))
+}
+
+func (i *IntervalSet) addRange(l, h int) {
+ i.addInterval(NewInterval(l, h+1))
+}
+
+func (i *IntervalSet) addInterval(v *Interval) {
+ if i.intervals == nil {
+ i.intervals = make([]*Interval, 0)
+ i.intervals = append(i.intervals, v)
+ } else {
+ // find insert pos
+ for k, interval := range i.intervals {
+ // distinct range -> insert
+ if v.Stop < interval.Start {
+ i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
+ return
+ } else if v.Stop == interval.Start {
+ i.intervals[k].Start = v.Start
+ return
+ } else if v.Start <= interval.Stop {
+ i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
+
+ // if not applying to end, merge potential overlaps
+ if k < len(i.intervals)-1 {
+ l := i.intervals[k]
+ r := i.intervals[k+1]
+ // if r contained in l
+ if l.Stop >= r.Stop {
+ i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
+ } else if l.Stop >= r.Start { // partial overlap
+ i.intervals[k] = NewInterval(l.Start, r.Stop)
+ i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
+ }
+ }
+ return
+ }
+ }
+ // greater than any exiting
+ i.intervals = append(i.intervals, v)
+ }
+}
+
+func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
+ if other.intervals != nil {
+ for k := 0; k < len(other.intervals); k++ {
+ i2 := other.intervals[k]
+ i.addInterval(NewInterval(i2.Start, i2.Stop))
+ }
+ }
+ return i
+}
+
+func (i *IntervalSet) complement(start int, stop int) *IntervalSet {
+ result := NewIntervalSet()
+ result.addInterval(NewInterval(start, stop+1))
+ for j := 0; j < len(i.intervals); j++ {
+ result.removeRange(i.intervals[j])
+ }
+ return result
+}
+
+func (i *IntervalSet) contains(item int) bool {
+ if i.intervals == nil {
+ return false
+ }
+ for k := 0; k < len(i.intervals); k++ {
+ if i.intervals[k].Contains(item) {
+ return true
+ }
+ }
+ return false
+}
+
+func (i *IntervalSet) length() int {
+ len := 0
+
+ for _, v := range i.intervals {
+ len += v.length()
+ }
+
+ return len
+}
+
+func (i *IntervalSet) removeRange(v *Interval) {
+ if v.Start == v.Stop-1 {
+ i.removeOne(v.Start)
+ } else if i.intervals != nil {
+ k := 0
+ for n := 0; n < len(i.intervals); n++ {
+ ni := i.intervals[k]
+ // intervals are ordered
+ if v.Stop <= ni.Start {
+ return
+ } else if v.Start > ni.Start && v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ x := NewInterval(v.Stop, ni.Stop)
+ // i.intervals.splice(k, 0, x)
+ i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
+ return
+ } else if v.Start <= ni.Start && v.Stop >= ni.Stop {
+ // i.intervals.splice(k, 1)
+ i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
+ k = k - 1 // need another pass
+ } else if v.Start < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ } else if v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(v.Stop, ni.Stop)
+ }
+ k++
+ }
+ }
+}
+
+func (i *IntervalSet) removeOne(v int) {
+ if i.intervals != nil {
+ for k := 0; k < len(i.intervals); k++ {
+ ki := i.intervals[k]
+ // intervals i ordered
+ if v < ki.Start {
+ return
+ } else if v == ki.Start && v == ki.Stop-1 {
+ // i.intervals.splice(k, 1)
+ i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
+ return
+ } else if v == ki.Start {
+ i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
+ return
+ } else if v == ki.Stop-1 {
+ i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
+ return
+ } else if v < ki.Stop-1 {
+ x := NewInterval(ki.Start, v)
+ ki.Start = v + 1
+ // i.intervals.splice(k, 0, x)
+ i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
+ return
+ }
+ }
+ }
+}
+
+func (i *IntervalSet) String() string {
+ return i.StringVerbose(nil, nil, false)
+}
+
+func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
+
+ if i.intervals == nil {
+ return "{}"
+ } else if literalNames != nil || symbolicNames != nil {
+ return i.toTokenString(literalNames, symbolicNames)
+ } else if elemsAreChar {
+ return i.toCharString()
+ }
+
+ return i.toIndexString()
+}
+
+func (i *IntervalSet) GetIntervals() []*Interval {
+ return i.intervals
+}
+
+func (i *IntervalSet) toCharString() string {
+ names := make([]string, len(i.intervals))
+
+ var sb strings.Builder
+
+ for j := 0; j < len(i.intervals); j++ {
+ v := i.intervals[j]
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
+ names = append(names, "")
+ } else {
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(v.Start))
+ sb.WriteByte('\'')
+ names = append(names, sb.String())
+ sb.Reset()
+ }
+ } else {
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(v.Start))
+ sb.WriteString("'..'")
+ sb.WriteRune(rune(v.Stop - 1))
+ sb.WriteByte('\'')
+ names = append(names, sb.String())
+ sb.Reset()
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) toIndexString() string {
+
+ names := make([]string, 0)
+ for j := 0; j < len(i.intervals); j++ {
+ v := i.intervals[j]
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
+ names = append(names, "")
+ } else {
+ names = append(names, strconv.Itoa(v.Start))
+ }
+ } else {
+ names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
+ names := make([]string, 0)
+ for _, v := range i.intervals {
+ for j := v.Start; j < v.Stop; j++ {
+ names = append(names, i.elementName(literalNames, symbolicNames, j))
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
+ if a == TokenEOF {
+ return ""
+ } else if a == TokenEpsilon {
+ return ""
+ } else {
+ if a < len(literalNames) && literalNames[a] != "" {
+ return literalNames[a]
+ }
+
+ return symbolicNames[a]
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
new file mode 100644
index 0000000..e5a74f0
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
@@ -0,0 +1,198 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+import (
+ "sort"
+)
+
+// Collectable is an interface that a struct should implement if it is to be
+// usable as a key in these collections.
+type Collectable[T any] interface {
+ Hash() int
+ Equals(other Collectable[T]) bool
+}
+
+type Comparator[T any] interface {
+ Hash1(o T) int
+ Equals2(T, T) bool
+}
+
+// JStore implements a container that allows the use of a struct to calculate the key
+// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
+// serve the needs of the ANTLR Go runtime.
+//
+// For ease of porting the logic of the runtime from the master target (Java), this collection
+// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
+// function as the key. The values are stored in a standard go map which internally is a form of hashmap
+// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
+// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
+// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
+// we understand the requirements, then this is fine - this is not a general purpose collection.
+type JStore[T any, C Comparator[T]] struct {
+ store map[int][]T
+ len int
+ comparator Comparator[T]
+}
+
+func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] {
+
+ if comparator == nil {
+ panic("comparator cannot be nil")
+ }
+
+ s := &JStore[T, C]{
+ store: make(map[int][]T, 1),
+ comparator: comparator,
+ }
+ return s
+}
+
+// Put will store given value in the collection. Note that the key for storage is generated from
+// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
+// as any kind of general collection.
+//
+// If the key has a hash conflict, then the value will be added to the slice of values associated with the
+// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
+// tested by calling the equals() method on the key.
+//
+// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
+//
+// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
+func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn
+
+ kh := s.comparator.Hash1(value)
+
+ for _, v1 := range s.store[kh] {
+ if s.comparator.Equals2(value, v1) {
+ return v1, true
+ }
+ }
+ s.store[kh] = append(s.store[kh], value)
+ s.len++
+ return value, false
+}
+
+// Get will return the value associated with the key - the type of the key is the same type as the value
+// which would not generally be useful, but this is a specific thing for ANTLR where the key is
+// generated using the object we are going to store.
+func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn
+
+ kh := s.comparator.Hash1(key)
+
+ for _, v := range s.store[kh] {
+ if s.comparator.Equals2(key, v) {
+ return v, true
+ }
+ }
+ return key, false
+}
+
+// Contains returns true if the given key is present in the store
+func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn
+
+ _, present := s.Get(key)
+ return present
+}
+
+func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
+ vs := make([]T, 0, len(s.store))
+ for _, v := range s.store {
+ vs = append(vs, v...)
+ }
+ sort.Slice(vs, func(i, j int) bool {
+ return less(vs[i], vs[j])
+ })
+
+ return vs
+}
+
+func (s *JStore[T, C]) Each(f func(T) bool) {
+ for _, e := range s.store {
+ for _, v := range e {
+ f(v)
+ }
+ }
+}
+
+func (s *JStore[T, C]) Len() int {
+ return s.len
+}
+
+func (s *JStore[T, C]) Values() []T {
+ vs := make([]T, 0, len(s.store))
+ for _, e := range s.store {
+ for _, v := range e {
+ vs = append(vs, v)
+ }
+ }
+ return vs
+}
+
+type entry[K, V any] struct {
+ key K
+ val V
+}
+
+type JMap[K, V any, C Comparator[K]] struct {
+ store map[int][]*entry[K, V]
+ len int
+ comparator Comparator[K]
+}
+
+func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] {
+ return &JMap[K, V, C]{
+ store: make(map[int][]*entry[K, V], 1),
+ comparator: comparator,
+ }
+}
+
+func (m *JMap[K, V, C]) Put(key K, val V) {
+ kh := m.comparator.Hash1(key)
+
+ m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
+ m.len++
+}
+
+func (m *JMap[K, V, C]) Values() []V {
+ vs := make([]V, 0, len(m.store))
+ for _, e := range m.store {
+ for _, v := range e {
+ vs = append(vs, v.val)
+ }
+ }
+ return vs
+}
+
+func (m *JMap[K, V, C]) Get(key K) (V, bool) {
+
+ var none V
+ kh := m.comparator.Hash1(key)
+ for _, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ return e.val, true
+ }
+ }
+ return none, false
+}
+
+func (m *JMap[K, V, C]) Len() int {
+ return len(m.store)
+}
+
+func (m *JMap[K, V, C]) Delete(key K) {
+ kh := m.comparator.Hash1(key)
+ for i, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
+ m.len--
+ return
+ }
+ }
+}
+
+func (m *JMap[K, V, C]) Clear() {
+ m.store = make(map[int][]*entry[K, V])
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
new file mode 100644
index 0000000..6533f05
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
@@ -0,0 +1,416 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// A lexer is recognizer that draws input symbols from a character stream.
+// lexer grammars result in a subclass of this object. A Lexer object
+// uses simplified Match() and error recovery mechanisms in the interest
+// of speed.
+///
+
+type Lexer interface {
+ TokenSource
+ Recognizer
+
+ Emit() Token
+
+ SetChannel(int)
+ PushMode(int)
+ PopMode() int
+ SetType(int)
+ SetMode(int)
+}
+
+type BaseLexer struct {
+ *BaseRecognizer
+
+ Interpreter ILexerATNSimulator
+ TokenStartCharIndex int
+ TokenStartLine int
+ TokenStartColumn int
+ ActionType int
+ Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
+
+ input CharStream
+ factory TokenFactory
+ tokenFactorySourcePair *TokenSourceCharStreamPair
+ token Token
+ hitEOF bool
+ channel int
+ thetype int
+ modeStack IntStack
+ mode int
+ text string
+}
+
+func NewBaseLexer(input CharStream) *BaseLexer {
+
+ lexer := new(BaseLexer)
+
+ lexer.BaseRecognizer = NewBaseRecognizer()
+
+ lexer.input = input
+ lexer.factory = CommonTokenFactoryDEFAULT
+ lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
+
+ lexer.Virt = lexer
+
+ lexer.Interpreter = nil // child classes must populate it
+
+ // The goal of all lexer rules/methods is to create a token object.
+ // l is an instance variable as multiple rules may collaborate to
+ // create a single token. NextToken will return l object after
+ // Matching lexer rule(s). If you subclass to allow multiple token
+ // emissions, then set l to the last token to be Matched or
+ // something nonnil so that the auto token emit mechanism will not
+ // emit another token.
+ lexer.token = nil
+
+ // What character index in the stream did the current token start at?
+ // Needed, for example, to get the text for current token. Set at
+ // the start of NextToken.
+ lexer.TokenStartCharIndex = -1
+
+ // The line on which the first character of the token resides///
+ lexer.TokenStartLine = -1
+
+ // The character position of first character within the line///
+ lexer.TokenStartColumn = -1
+
+ // Once we see EOF on char stream, next token will be EOF.
+ // If you have DONE : EOF then you see DONE EOF.
+ lexer.hitEOF = false
+
+ // The channel number for the current token///
+ lexer.channel = TokenDefaultChannel
+
+ // The token type for the current token///
+ lexer.thetype = TokenInvalidType
+
+ lexer.modeStack = make([]int, 0)
+ lexer.mode = LexerDefaultMode
+
+ // You can set the text for the current token to override what is in
+ // the input char buffer. Use setText() or can set l instance var.
+ // /
+ lexer.text = ""
+
+ return lexer
+}
+
+const (
+ LexerDefaultMode = 0
+ LexerMore = -2
+ LexerSkip = -3
+)
+
+const (
+ LexerDefaultTokenChannel = TokenDefaultChannel
+ LexerHidden = TokenHiddenChannel
+ LexerMinCharValue = 0x0000
+ LexerMaxCharValue = 0x10FFFF
+)
+
+func (b *BaseLexer) reset() {
+ // wack Lexer state variables
+ if b.input != nil {
+ b.input.Seek(0) // rewind the input
+ }
+ b.token = nil
+ b.thetype = TokenInvalidType
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = -1
+ b.TokenStartColumn = -1
+ b.TokenStartLine = -1
+ b.text = ""
+
+ b.hitEOF = false
+ b.mode = LexerDefaultMode
+ b.modeStack = make([]int, 0)
+
+ b.Interpreter.reset()
+}
+
+func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
+ return b.Interpreter
+}
+
+func (b *BaseLexer) GetInputStream() CharStream {
+ return b.input
+}
+
+func (b *BaseLexer) GetSourceName() string {
+ return b.GrammarFileName
+}
+
+func (b *BaseLexer) SetChannel(v int) {
+ b.channel = v
+}
+
+func (b *BaseLexer) GetTokenFactory() TokenFactory {
+ return b.factory
+}
+
+func (b *BaseLexer) setTokenFactory(f TokenFactory) {
+ b.factory = f
+}
+
+func (b *BaseLexer) safeMatch() (ret int) {
+ defer func() {
+ if e := recover(); e != nil {
+ if re, ok := e.(RecognitionException); ok {
+ b.notifyListeners(re) // Report error
+ b.Recover(re)
+ ret = LexerSkip // default
+ }
+ }
+ }()
+
+ return b.Interpreter.Match(b.input, b.mode)
+}
+
+// Return a token from l source i.e., Match a token on the char stream.
+func (b *BaseLexer) NextToken() Token {
+ if b.input == nil {
+ panic("NextToken requires a non-nil input stream.")
+ }
+
+ tokenStartMarker := b.input.Mark()
+
+ // previously in finally block
+ defer func() {
+ // make sure we release marker after Match or
+ // unbuffered char stream will keep buffering
+ b.input.Release(tokenStartMarker)
+ }()
+
+ for {
+ if b.hitEOF {
+ b.EmitEOF()
+ return b.token
+ }
+ b.token = nil
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = b.input.Index()
+ b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
+ b.TokenStartLine = b.Interpreter.GetLine()
+ b.text = ""
+ continueOuter := false
+ for {
+ b.thetype = TokenInvalidType
+ ttype := LexerSkip
+
+ ttype = b.safeMatch()
+
+ if b.input.LA(1) == TokenEOF {
+ b.hitEOF = true
+ }
+ if b.thetype == TokenInvalidType {
+ b.thetype = ttype
+ }
+ if b.thetype == LexerSkip {
+ continueOuter = true
+ break
+ }
+ if b.thetype != LexerMore {
+ break
+ }
+ }
+
+ if continueOuter {
+ continue
+ }
+ if b.token == nil {
+ b.Virt.Emit()
+ }
+ return b.token
+ }
+}
+
+// Instruct the lexer to Skip creating a token for current lexer rule
+// and look for another token. NextToken() knows to keep looking when
+// a lexer rule finishes with token set to SKIPTOKEN. Recall that
+// if token==nil at end of any token rule, it creates one for you
+// and emits it.
+// /
+func (b *BaseLexer) Skip() {
+ b.thetype = LexerSkip
+}
+
+func (b *BaseLexer) More() {
+ b.thetype = LexerMore
+}
+
+func (b *BaseLexer) SetMode(m int) {
+ b.mode = m
+}
+
+func (b *BaseLexer) PushMode(m int) {
+ if LexerATNSimulatorDebug {
+ fmt.Println("pushMode " + strconv.Itoa(m))
+ }
+ b.modeStack.Push(b.mode)
+ b.mode = m
+}
+
+func (b *BaseLexer) PopMode() int {
+ if len(b.modeStack) == 0 {
+ panic("Empty Stack")
+ }
+ if LexerATNSimulatorDebug {
+ fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
+ }
+ i, _ := b.modeStack.Pop()
+ b.mode = i
+ return b.mode
+}
+
+func (b *BaseLexer) inputStream() CharStream {
+ return b.input
+}
+
+// SetInputStream resets the lexer input stream and associated lexer state.
+func (b *BaseLexer) SetInputStream(input CharStream) {
+ b.input = nil
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+ b.reset()
+ b.input = input
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+}
+
+func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
+ return b.tokenFactorySourcePair
+}
+
+// By default does not support multiple emits per NextToken invocation
+// for efficiency reasons. Subclass and override l method, NextToken,
+// and GetToken (to push tokens into a list and pull from that list
+// rather than a single variable as l implementation does).
+// /
+func (b *BaseLexer) EmitToken(token Token) {
+ b.token = token
+}
+
+// The standard method called to automatically emit a token at the
+// outermost lexical rule. The token object should point into the
+// char buffer start..stop. If there is a text override in 'text',
+// use that to set the token's text. Override l method to emit
+// custom Token objects or provide a Newfactory.
+// /
+func (b *BaseLexer) Emit() Token {
+ t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
+ b.EmitToken(t)
+ return t
+}
+
+func (b *BaseLexer) EmitEOF() Token {
+ cpos := b.GetCharPositionInLine()
+ lpos := b.GetLine()
+ eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
+ b.EmitToken(eof)
+ return eof
+}
+
+func (b *BaseLexer) GetCharPositionInLine() int {
+ return b.Interpreter.GetCharPositionInLine()
+}
+
+func (b *BaseLexer) GetLine() int {
+ return b.Interpreter.GetLine()
+}
+
+func (b *BaseLexer) GetType() int {
+ return b.thetype
+}
+
+func (b *BaseLexer) SetType(t int) {
+ b.thetype = t
+}
+
+// What is the index of the current character of lookahead?///
+func (b *BaseLexer) GetCharIndex() int {
+ return b.input.Index()
+}
+
+// Return the text Matched so far for the current token or any text override.
+// Set the complete text of l token it wipes any previous changes to the text.
+func (b *BaseLexer) GetText() string {
+ if b.text != "" {
+ return b.text
+ }
+
+ return b.Interpreter.GetText(b.input)
+}
+
+func (b *BaseLexer) SetText(text string) {
+ b.text = text
+}
+
+func (b *BaseLexer) GetATN() *ATN {
+ return b.Interpreter.ATN()
+}
+
+// Return a list of all Token objects in input char stream.
+// Forces load of all tokens. Does not include EOF token.
+// /
+func (b *BaseLexer) GetAllTokens() []Token {
+ vl := b.Virt
+ tokens := make([]Token, 0)
+ t := vl.NextToken()
+ for t.GetTokenType() != TokenEOF {
+ tokens = append(tokens, t)
+ t = vl.NextToken()
+ }
+ return tokens
+}
+
+func (b *BaseLexer) notifyListeners(e RecognitionException) {
+ start := b.TokenStartCharIndex
+ stop := b.input.Index()
+ text := b.input.GetTextFromInterval(NewInterval(start, stop))
+ msg := "token recognition error at: '" + text + "'"
+ listener := b.GetErrorListenerDispatch()
+ listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
+}
+
+func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
+ if c == TokenEOF {
+ return ""
+ } else if c == '\n' {
+ return "\\n"
+ } else if c == '\t' {
+ return "\\t"
+ } else if c == '\r' {
+ return "\\r"
+ } else {
+ return string(c)
+ }
+}
+
+func (b *BaseLexer) getCharErrorDisplay(c rune) string {
+ return "'" + b.getErrorDisplayForChar(c) + "'"
+}
+
+// Lexers can normally Match any char in it's vocabulary after Matching
+// a token, so do the easy thing and just kill a character and hope
+// it all works out. You can instead use the rule invocation stack
+// to do sophisticated error recovery if you are in a fragment rule.
+// /
+func (b *BaseLexer) Recover(re RecognitionException) {
+ if b.input.LA(1) != TokenEOF {
+ if _, ok := re.(*LexerNoViableAltException); ok {
+ // Skip a char and try again
+ b.Interpreter.Consume(b.input)
+ } else {
+ // TODO: Do we lose character or line position information?
+ b.input.Consume()
+ }
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
new file mode 100644
index 0000000..111656c
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
@@ -0,0 +1,432 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "strconv"
+
+const (
+ LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
+ LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
+ LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
+ LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
+ LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
+ LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
+ LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
+ LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
+)
+
+type LexerAction interface {
+ getActionType() int
+ getIsPositionDependent() bool
+ execute(lexer Lexer)
+ Hash() int
+ Equals(other LexerAction) bool
+}
+
+type BaseLexerAction struct {
+ actionType int
+ isPositionDependent bool
+}
+
+func NewBaseLexerAction(action int) *BaseLexerAction {
+ la := new(BaseLexerAction)
+
+ la.actionType = action
+ la.isPositionDependent = false
+
+ return la
+}
+
+func (b *BaseLexerAction) execute(lexer Lexer) {
+ panic("Not implemented")
+}
+
+func (b *BaseLexerAction) getActionType() int {
+ return b.actionType
+}
+
+func (b *BaseLexerAction) getIsPositionDependent() bool {
+ return b.isPositionDependent
+}
+
+func (b *BaseLexerAction) Hash() int {
+ return b.actionType
+}
+
+func (b *BaseLexerAction) Equals(other LexerAction) bool {
+ return b == other
+}
+
+// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
+//
+//
The {@code Skip} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
+type LexerSkipAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerSkipAction() *LexerSkipAction {
+ la := new(LexerSkipAction)
+ la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip)
+ return la
+}
+
+// Provides a singleton instance of l parameterless lexer action.
+var LexerSkipActionINSTANCE = NewLexerSkipAction()
+
+func (l *LexerSkipAction) execute(lexer Lexer) {
+ lexer.Skip()
+}
+
+func (l *LexerSkipAction) String() string {
+ return "skip"
+}
+
+// Implements the {@code type} lexer action by calling {@link Lexer//setType}
+//
+// with the assigned type.
+type LexerTypeAction struct {
+ *BaseLexerAction
+
+ thetype int
+}
+
+func NewLexerTypeAction(thetype int) *LexerTypeAction {
+ l := new(LexerTypeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType)
+ l.thetype = thetype
+ return l
+}
+
+func (l *LexerTypeAction) execute(lexer Lexer) {
+ lexer.SetType(l.thetype)
+}
+
+func (l *LexerTypeAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.thetype)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerTypeAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerTypeAction); !ok {
+ return false
+ } else {
+ return l.thetype == other.(*LexerTypeAction).thetype
+ }
+}
+
+func (l *LexerTypeAction) String() string {
+ return "actionType(" + strconv.Itoa(l.thetype) + ")"
+}
+
+// Implements the {@code pushMode} lexer action by calling
+// {@link Lexer//pushMode} with the assigned mode.
+type LexerPushModeAction struct {
+ *BaseLexerAction
+
+ mode int
+}
+
+func NewLexerPushModeAction(mode int) *LexerPushModeAction {
+
+ l := new(LexerPushModeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode)
+
+ l.mode = mode
+ return l
+}
+
+//
This action is implemented by calling {@link Lexer//pushMode} with the
+// value provided by {@link //getMode}.
+func (l *LexerPushModeAction) execute(lexer Lexer) {
+ lexer.PushMode(l.mode)
+}
+
+func (l *LexerPushModeAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.mode)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerPushModeAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerPushModeAction); !ok {
+ return false
+ } else {
+ return l.mode == other.(*LexerPushModeAction).mode
+ }
+}
+
+func (l *LexerPushModeAction) String() string {
+ return "pushMode(" + strconv.Itoa(l.mode) + ")"
+}
+
+// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
+//
+//
The {@code popMode} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
This action is implemented by calling {@link Lexer//mode} with the
+// value provided by {@link //getMode}.
+func (l *LexerModeAction) execute(lexer Lexer) {
+ lexer.SetMode(l.mode)
+}
+
+func (l *LexerModeAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.mode)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerModeAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerModeAction); !ok {
+ return false
+ } else {
+ return l.mode == other.(*LexerModeAction).mode
+ }
+}
+
+func (l *LexerModeAction) String() string {
+ return "mode(" + strconv.Itoa(l.mode) + ")"
+}
+
+// Executes a custom lexer action by calling {@link Recognizer//action} with the
+// rule and action indexes assigned to the custom action. The implementation of
+// a custom action is added to the generated code for the lexer in an override
+// of {@link Recognizer//action} when the grammar is compiled.
+//
+//
This class may represent embedded actions created with the {...}
+// syntax in ANTLR 4, as well as actions created for lexer commands where the
+// command argument could not be evaluated when the grammar was compiled.
+
+// Constructs a custom lexer action with the specified rule and action
+// indexes.
+//
+// @param ruleIndex The rule index to use for calls to
+// {@link Recognizer//action}.
+// @param actionIndex The action index to use for calls to
+// {@link Recognizer//action}.
+
+type LexerCustomAction struct {
+ *BaseLexerAction
+ ruleIndex, actionIndex int
+}
+
+func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
+ l := new(LexerCustomAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom)
+ l.ruleIndex = ruleIndex
+ l.actionIndex = actionIndex
+ l.isPositionDependent = true
+ return l
+}
+
+//
Custom actions are implemented by calling {@link Lexer//action} with the
+// appropriate rule and action indexes.
+func (l *LexerCustomAction) execute(lexer Lexer) {
+ lexer.Action(nil, l.ruleIndex, l.actionIndex)
+}
+
+func (l *LexerCustomAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.ruleIndex)
+ h = murmurUpdate(h, l.actionIndex)
+ return murmurFinish(h, 3)
+}
+
+func (l *LexerCustomAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerCustomAction); !ok {
+ return false
+ } else {
+ return l.ruleIndex == other.(*LexerCustomAction).ruleIndex &&
+ l.actionIndex == other.(*LexerCustomAction).actionIndex
+ }
+}
+
+// Implements the {@code channel} lexer action by calling
+// {@link Lexer//setChannel} with the assigned channel.
+// Constructs a New{@code channel} action with the specified channel value.
+// @param channel The channel value to pass to {@link Lexer//setChannel}.
+type LexerChannelAction struct {
+ *BaseLexerAction
+
+ channel int
+}
+
+func NewLexerChannelAction(channel int) *LexerChannelAction {
+ l := new(LexerChannelAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
+ l.channel = channel
+ return l
+}
+
+//
This action is implemented by calling {@link Lexer//setChannel} with the
+// value provided by {@link //getChannel}.
+func (l *LexerChannelAction) execute(lexer Lexer) {
+ lexer.SetChannel(l.channel)
+}
+
+func (l *LexerChannelAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.channel)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerChannelAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerChannelAction); !ok {
+ return false
+ } else {
+ return l.channel == other.(*LexerChannelAction).channel
+ }
+}
+
+func (l *LexerChannelAction) String() string {
+ return "channel(" + strconv.Itoa(l.channel) + ")"
+}
+
+// This implementation of {@link LexerAction} is used for tracking input offsets
+// for position-dependent actions within a {@link LexerActionExecutor}.
+//
+//
This action is not serialized as part of the ATN, and is only required for
+// position-dependent lexer actions which appear at a location other than the
+// end of a rule. For more information about DFA optimizations employed for
+// lexer actions, see {@link LexerActionExecutor//append} and
+// {@link LexerActionExecutor//fixOffsetBeforeMatch}.
+
+// Constructs a Newindexed custom action by associating a character offset
+// with a {@link LexerAction}.
+//
+//
Note: This class is only required for lexer actions for which
+// {@link LexerAction//isPositionDependent} returns {@code true}.
+//
+// @param offset The offset into the input {@link CharStream}, relative to
+// the token start index, at which the specified lexer action should be
+// executed.
+// @param action The lexer action to execute at a particular offset in the
+// input {@link CharStream}.
+type LexerIndexedCustomAction struct {
+ *BaseLexerAction
+
+ offset int
+ lexerAction LexerAction
+ isPositionDependent bool
+}
+
+func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
+
+ l := new(LexerIndexedCustomAction)
+ l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType())
+
+ l.offset = offset
+ l.lexerAction = lexerAction
+ l.isPositionDependent = true
+
+ return l
+}
+
+//
This method calls {@link //execute} on the result of {@link //getAction}
+// using the provided {@code lexer}.
+func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
+ // assume the input stream position was properly set by the calling code
+ l.lexerAction.execute(lexer)
+}
+
+func (l *LexerIndexedCustomAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.offset)
+ h = murmurUpdate(h, l.lexerAction.Hash())
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerIndexedCustomAction); !ok {
+ return false
+ } else {
+ return l.offset == other.(*LexerIndexedCustomAction).offset &&
+ l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
new file mode 100644
index 0000000..be1ba7a
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
@@ -0,0 +1,186 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "golang.org/x/exp/slices"
+
+// Represents an executor for a sequence of lexer actions which traversed during
+// the Matching operation of a lexer rule (token).
+//
+//
The executor tracks position information for position-dependent lexer actions
+// efficiently, ensuring that actions appearing only at the end of the rule do
+// not cause bloating of the {@link DFA} created for the lexer.
+
+type LexerActionExecutor struct {
+ lexerActions []LexerAction
+ cachedHash int
+}
+
+func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
+
+ if lexerActions == nil {
+ lexerActions = make([]LexerAction, 0)
+ }
+
+ l := new(LexerActionExecutor)
+
+ l.lexerActions = lexerActions
+
+ // Caches the result of {@link //hashCode} since the hash code is an element
+ // of the performance-critical {@link LexerATNConfig//hashCode} operation.
+ l.cachedHash = murmurInit(57)
+ for _, a := range lexerActions {
+ l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
+ }
+
+ return l
+}
+
+// Creates a {@link LexerActionExecutor} which executes the actions for
+// the input {@code lexerActionExecutor} followed by a specified
+// {@code lexerAction}.
+//
+// @param lexerActionExecutor The executor for actions already traversed by
+// the lexer while Matching a token within a particular
+// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
+// though it were an empty executor.
+// @param lexerAction The lexer action to execute after the actions
+// specified in {@code lexerActionExecutor}.
+//
+// @return A {@link LexerActionExecutor} for executing the combine actions
+// of {@code lexerActionExecutor} and {@code lexerAction}.
+func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
+ if lexerActionExecutor == nil {
+ return NewLexerActionExecutor([]LexerAction{lexerAction})
+ }
+
+ return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
+}
+
+// Creates a {@link LexerActionExecutor} which encodes the current offset
+// for position-dependent lexer actions.
+//
+//
Normally, when the executor encounters lexer actions where
+// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
+// {@link IntStream//seek} on the input {@link CharStream} to set the input
+// position to the end of the current token. This behavior provides
+// for efficient DFA representation of lexer actions which appear at the end
+// of a lexer rule, even when the lexer rule Matches a variable number of
+// characters.
+//
+//
Prior to traversing a Match transition in the ATN, the current offset
+// from the token start index is assigned to all position-dependent lexer
+// actions which have not already been assigned a fixed offset. By storing
+// the offsets relative to the token start index, the DFA representation of
+// lexer actions which appear in the middle of tokens remains efficient due
+// to sharing among tokens of the same length, regardless of their absolute
+// position in the input stream.
+//
+//
If the current executor already has offsets assigned to all
+// position-dependent lexer actions, the method returns {@code this}.
+//
+// @param offset The current offset to assign to all position-dependent
+// lexer actions which do not already have offsets assigned.
+//
+// @return A {@link LexerActionExecutor} which stores input stream offsets
+// for all position-dependent lexer actions.
+// /
+func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
+ var updatedLexerActions []LexerAction
+ for i := 0; i < len(l.lexerActions); i++ {
+ _, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
+ if l.lexerActions[i].getIsPositionDependent() && !ok {
+ if updatedLexerActions == nil {
+ updatedLexerActions = make([]LexerAction, 0)
+
+ for _, a := range l.lexerActions {
+ updatedLexerActions = append(updatedLexerActions, a)
+ }
+ }
+
+ updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
+ }
+ }
+ if updatedLexerActions == nil {
+ return l
+ }
+
+ return NewLexerActionExecutor(updatedLexerActions)
+}
+
+// Execute the actions encapsulated by l executor within the context of a
+// particular {@link Lexer}.
+//
+//
This method calls {@link IntStream//seek} to set the position of the
+// {@code input} {@link CharStream} prior to calling
+// {@link LexerAction//execute} on a position-dependent action. Before the
+// method returns, the input position will be restored to the same position
+// it was in when the method was invoked.
+//
+// @param lexer The lexer instance.
+// @param input The input stream which is the source for the current token.
+// When l method is called, the current {@link IntStream//index} for
+// {@code input} should be the start of the following token, i.e. 1
+// character past the end of the current token.
+// @param startIndex The token start index. This value may be passed to
+// {@link IntStream//seek} to set the {@code input} position to the beginning
+// of the token.
+// /
+func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
+ requiresSeek := false
+ stopIndex := input.Index()
+
+ defer func() {
+ if requiresSeek {
+ input.Seek(stopIndex)
+ }
+ }()
+
+ for i := 0; i < len(l.lexerActions); i++ {
+ lexerAction := l.lexerActions[i]
+ if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
+ offset := la.offset
+ input.Seek(startIndex + offset)
+ lexerAction = la.lexerAction
+ requiresSeek = (startIndex + offset) != stopIndex
+ } else if lexerAction.getIsPositionDependent() {
+ input.Seek(stopIndex)
+ requiresSeek = false
+ }
+ lexerAction.execute(lexer)
+ }
+}
+
+func (l *LexerActionExecutor) Hash() int {
+ if l == nil {
+ // TODO: Why is this here? l should not be nil
+ return 61
+ }
+
+ // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode
+ return l.cachedHash
+}
+
+func (l *LexerActionExecutor) Equals(other interface{}) bool {
+ if l == other {
+ return true
+ }
+ othert, ok := other.(*LexerActionExecutor)
+ if !ok {
+ return false
+ }
+ if othert == nil {
+ return false
+ }
+ if l.cachedHash != othert.cachedHash {
+ return false
+ }
+ if len(l.lexerActions) != len(othert.lexerActions) {
+ return false
+ }
+ return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool {
+ return i.Equals(j)
+ })
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
new file mode 100644
index 0000000..c573b75
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
@@ -0,0 +1,684 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ LexerATNSimulatorDebug = false
+ LexerATNSimulatorDFADebug = false
+
+ LexerATNSimulatorMinDFAEdge = 0
+ LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
+
+ LexerATNSimulatorMatchCalls = 0
+)
+
+type ILexerATNSimulator interface {
+ IATNSimulator
+
+ reset()
+ Match(input CharStream, mode int) int
+ GetCharPositionInLine() int
+ GetLine() int
+ GetText(input CharStream) string
+ Consume(input CharStream)
+}
+
+type LexerATNSimulator struct {
+ *BaseATNSimulator
+
+ recog Lexer
+ predictionMode int
+ mergeCache DoubleDict
+ startIndex int
+ Line int
+ CharPositionInLine int
+ mode int
+ prevAccept *SimState
+ MatchCalls int
+}
+
+func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
+ l := new(LexerATNSimulator)
+
+ l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
+
+ l.decisionToDFA = decisionToDFA
+ l.recog = recog
+ // The current token's starting index into the character stream.
+ // Shared across DFA to ATN simulation in case the ATN fails and the
+ // DFA did not have a previous accept state. In l case, we use the
+ // ATN-generated exception object.
+ l.startIndex = -1
+ // line number 1..n within the input///
+ l.Line = 1
+ // The index of the character relative to the beginning of the line
+ // 0..n-1///
+ l.CharPositionInLine = 0
+ l.mode = LexerDefaultMode
+ // Used during DFA/ATN exec to record the most recent accept configuration
+ // info
+ l.prevAccept = NewSimState()
+ // done
+ return l
+}
+
+func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
+ l.CharPositionInLine = simulator.CharPositionInLine
+ l.Line = simulator.Line
+ l.mode = simulator.mode
+ l.startIndex = simulator.startIndex
+}
+
+func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
+ l.MatchCalls++
+ l.mode = mode
+ mark := input.Mark()
+
+ defer func() {
+ input.Release(mark)
+ }()
+
+ l.startIndex = input.Index()
+ l.prevAccept.reset()
+
+ dfa := l.decisionToDFA[mode]
+
+ var s0 *DFAState
+ l.atn.stateMu.RLock()
+ s0 = dfa.getS0()
+ l.atn.stateMu.RUnlock()
+
+ if s0 == nil {
+ return l.MatchATN(input)
+ }
+
+ return l.execATN(input, s0)
+}
+
+func (l *LexerATNSimulator) reset() {
+ l.prevAccept.reset()
+ l.startIndex = -1
+ l.Line = 1
+ l.CharPositionInLine = 0
+ l.mode = LexerDefaultMode
+}
+
+func (l *LexerATNSimulator) MatchATN(input CharStream) int {
+ startState := l.atn.modeToStartState[l.mode]
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
+ }
+ oldMode := l.mode
+ s0Closure := l.computeStartState(input, startState)
+ suppressEdge := s0Closure.hasSemanticContext
+ s0Closure.hasSemanticContext = false
+
+ next := l.addDFAState(s0Closure, suppressEdge)
+
+ predict := l.execATN(input, next)
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
+ }
+ return predict
+}
+
+func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("start state closure=" + ds0.configs.String())
+ }
+ if ds0.isAcceptState {
+ // allow zero-length tokens
+ l.captureSimState(l.prevAccept, input, ds0)
+ }
+ t := input.LA(1)
+ s := ds0 // s is current/from DFA state
+
+ for { // while more work
+ if LexerATNSimulatorDebug {
+ fmt.Println("execATN loop starting closure: " + s.configs.String())
+ }
+
+ // As we move src->trg, src->trg, we keep track of the previous trg to
+ // avoid looking up the DFA state again, which is expensive.
+ // If the previous target was already part of the DFA, we might
+ // be able to avoid doing a reach operation upon t. If s!=nil,
+ // it means that semantic predicates didn't prevent us from
+ // creating a DFA state. Once we know s!=nil, we check to see if
+ // the DFA state has an edge already for t. If so, we can just reuse
+ // it's configuration set there's no point in re-computing it.
+ // This is kind of like doing DFA simulation within the ATN
+ // simulation because DFA simulation is really just a way to avoid
+ // computing reach/closure sets. Technically, once we know that
+ // we have a previously added DFA state, we could jump over to
+ // the DFA simulator. But, that would mean popping back and forth
+ // a lot and making things more complicated algorithmically.
+ // This optimization makes a lot of sense for loops within DFA.
+ // A character will take us back to an existing DFA state
+ // that already has lots of edges out of it. e.g., .* in comments.
+ target := l.getExistingTargetState(s, t)
+ if target == nil {
+ target = l.computeTargetState(input, s, t)
+ // print("Computed:" + str(target))
+ }
+ if target == ATNSimulatorError {
+ break
+ }
+ // If l is a consumable input element, make sure to consume before
+ // capturing the accept state so the input index, line, and char
+ // position accurately reflect the state of the interpreter at the
+ // end of the token.
+ if t != TokenEOF {
+ l.Consume(input)
+ }
+ if target.isAcceptState {
+ l.captureSimState(l.prevAccept, input, target)
+ if t == TokenEOF {
+ break
+ }
+ }
+ t = input.LA(1)
+ s = target // flip current DFA target becomes Newsrc/from state
+ }
+
+ return l.failOrAccept(l.prevAccept, input, s.configs, t)
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// l method returns {@code nil}.
+//
+// @param s The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for l edge is not
+// already cached
+func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
+ if t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
+ return nil
+ }
+
+ l.atn.edgeMu.RLock()
+ defer l.atn.edgeMu.RUnlock()
+ if s.getEdges() == nil {
+ return nil
+ }
+ target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
+ if LexerATNSimulatorDebug && target != nil {
+ fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
+ }
+ return target
+}
+
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
+//
+// @param input The input stream
+// @param s The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, l method
+// returns {@link //ERROR}.
+func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
+ reach := NewOrderedATNConfigSet()
+
+ // if we don't find an existing DFA state
+ // Fill reach starting from closure, following t transitions
+ l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
+
+ if len(reach.configs) == 0 { // we got nowhere on t from s
+ if !reach.hasSemanticContext {
+ // we got nowhere on t, don't panic out l knowledge it'd
+ // cause a failover from DFA later.
+ l.addDFAEdge(s, t, ATNSimulatorError, nil)
+ }
+ // stop when we can't Match any more char
+ return ATNSimulatorError
+ }
+ // Add an edge from s to target DFA found/created for reach
+ return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
+}
+
+func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
+ if l.prevAccept.dfaState != nil {
+ lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
+ l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
+ return prevAccept.dfaState.prediction
+ }
+
+ // if no accept and EOF is first char, return EOF
+ if t == TokenEOF && input.Index() == l.startIndex {
+ return TokenEOF
+ }
+
+ panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
+}
+
+// Given a starting configuration set, figure out all ATN configurations
+// we can reach upon input {@code t}. Parameter {@code reach} is a return
+// parameter.
+func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
+ // l is used to Skip processing for configs which have a lower priority
+ // than a config that already reached an accept state for the same rule
+ SkipAlt := ATNInvalidAltNumber
+
+ for _, cfg := range closure.GetItems() {
+ currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
+ if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
+ continue
+ }
+
+ if LexerATNSimulatorDebug {
+
+ fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
+ }
+
+ for _, trans := range cfg.GetState().GetTransitions() {
+ target := l.getReachableTarget(trans, t)
+ if target != nil {
+ lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
+ if lexerActionExecutor != nil {
+ lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
+ }
+ treatEOFAsEpsilon := (t == TokenEOF)
+ config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
+ if l.closure(input, config, reach,
+ currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
+ // any remaining configs for l alt have a lower priority
+ // than the one that just reached an accept state.
+ SkipAlt = cfg.GetAlt()
+ }
+ }
+ }
+ }
+}
+
+func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
+ if LexerATNSimulatorDebug {
+ fmt.Printf("ACTION %v\n", lexerActionExecutor)
+ }
+ // seek to after last char in token
+ input.Seek(index)
+ l.Line = line
+ l.CharPositionInLine = charPos
+ if lexerActionExecutor != nil && l.recog != nil {
+ lexerActionExecutor.execute(l.recog, input, startIndex)
+ }
+}
+
+func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState {
+ if trans.Matches(t, 0, LexerMaxCharValue) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
+ configs := NewOrderedATNConfigSet()
+ for i := 0; i < len(p.GetTransitions()); i++ {
+ target := p.GetTransitions()[i].getTarget()
+ cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY)
+ l.closure(input, cfg, configs, false, false, false)
+ }
+
+ return configs
+}
+
+// Since the alternatives within any lexer decision are ordered by
+// preference, l method stops pursuing the closure as soon as an accept
+// state is reached. After the first accept state is reached by depth-first
+// search from {@code config}, all other (potentially reachable) states for
+// l rule would have a lower priority.
+//
+// @return {@code true} if an accept state is reached, otherwise
+// {@code false}.
+func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
+ currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
+ }
+
+ _, ok := config.state.(*RuleStopState)
+ if ok {
+
+ if LexerATNSimulatorDebug {
+ if l.recog != nil {
+ fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
+ } else {
+ fmt.Printf("closure at rule stop %s\n", config)
+ }
+ }
+
+ if config.context == nil || config.context.hasEmptyPath() {
+ if config.context == nil || config.context.isEmpty() {
+ configs.Add(config, nil)
+ return true
+ }
+
+ configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil)
+ currentAltReachedAcceptState = true
+ }
+ if config.context != nil && !config.context.isEmpty() {
+ for i := 0; i < config.context.length(); i++ {
+ if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState {
+ newContext := config.context.GetParent(i) // "pop" return state
+ returnState := l.atn.states[config.context.getReturnState(i)]
+ cfg := NewLexerATNConfig2(config, returnState, newContext)
+ currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
+ }
+ }
+ }
+ return currentAltReachedAcceptState
+ }
+ // optimization
+ if !config.state.GetEpsilonOnlyTransitions() {
+ if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
+ configs.Add(config, nil)
+ }
+ }
+ for j := 0; j < len(config.state.GetTransitions()); j++ {
+ trans := config.state.GetTransitions()[j]
+ cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon)
+ if cfg != nil {
+ currentAltReachedAcceptState = l.closure(input, cfg, configs,
+ currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
+ }
+ }
+ return currentAltReachedAcceptState
+}
+
+// side-effect: can alter configs.hasSemanticContext
+func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
+ configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
+
+ var cfg *LexerATNConfig
+
+ if trans.getSerializationType() == TransitionRULE {
+
+ rt := trans.(*RuleTransition)
+ newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber())
+ cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
+
+ } else if trans.getSerializationType() == TransitionPRECEDENCE {
+ panic("Precedence predicates are not supported in lexers.")
+ } else if trans.getSerializationType() == TransitionPREDICATE {
+ // Track traversing semantic predicates. If we traverse,
+ // we cannot add a DFA state for l "reach" computation
+ // because the DFA would not test the predicate again in the
+ // future. Rather than creating collections of semantic predicates
+ // like v3 and testing them on prediction, v4 will test them on the
+ // fly all the time using the ATN not the DFA. This is slower but
+ // semantically it's not used that often. One of the key elements to
+ // l predicate mechanism is not adding DFA states that see
+ // predicates immediately afterwards in the ATN. For example,
+
+ // a : ID {p1}? | ID {p2}?
+
+ // should create the start state for rule 'a' (to save start state
+ // competition), but should not create target of ID state. The
+ // collection of ATN states the following ID references includes
+ // states reached by traversing predicates. Since l is when we
+ // test them, we cannot cash the DFA state target of ID.
+
+ pt := trans.(*PredicateTransition)
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
+ }
+ configs.SetHasSemanticContext(true)
+ if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ } else if trans.getSerializationType() == TransitionACTION {
+ if config.context == nil || config.context.hasEmptyPath() {
+ // execute actions anywhere in the start rule for a token.
+ //
+ // TODO: if the entry rule is invoked recursively, some
+ // actions may be executed during the recursive call. The
+ // problem can appear when hasEmptyPath() is true but
+ // isEmpty() is false. In l case, the config needs to be
+ // split into two contexts - one with just the empty path
+ // and another with everything but the empty path.
+ // Unfortunately, the current algorithm does not allow
+ // getEpsilonTarget to return two configurations, so
+ // additional modifications are needed before we can support
+ // the split operation.
+ lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex])
+ cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor)
+ } else {
+ // ignore actions in referenced rules
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ } else if trans.getSerializationType() == TransitionEPSILON {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ } else if trans.getSerializationType() == TransitionATOM ||
+ trans.getSerializationType() == TransitionRANGE ||
+ trans.getSerializationType() == TransitionSET {
+ if treatEOFAsEpsilon {
+ if trans.Matches(TokenEOF, 0, LexerMaxCharValue) {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ }
+ }
+ return cfg
+}
+
+// Evaluate a predicate specified in the lexer.
+//
+//
If {@code speculative} is {@code true}, l method was called before
+// {@link //consume} for the Matched character. This method should call
+// {@link //consume} before evaluating the predicate to ensure position
+// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
+// and {@link Lexer//getcolumn}, properly reflect the current
+// lexer state. This method should restore {@code input} and the simulator
+// to the original state before returning (i.e. undo the actions made by the
+// call to {@link //consume}.
+//
+// @param input The input stream.
+// @param ruleIndex The rule containing the predicate.
+// @param predIndex The index of the predicate within the rule.
+// @param speculative {@code true} if the current index in {@code input} is
+// one character before the predicate's location.
+//
+// @return {@code true} if the specified predicate evaluates to
+// {@code true}.
+// /
+func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
+ // assume true if no recognizer was provided
+ if l.recog == nil {
+ return true
+ }
+ if !speculative {
+ return l.recog.Sempred(nil, ruleIndex, predIndex)
+ }
+ savedcolumn := l.CharPositionInLine
+ savedLine := l.Line
+ index := input.Index()
+ marker := input.Mark()
+
+ defer func() {
+ l.CharPositionInLine = savedcolumn
+ l.Line = savedLine
+ input.Seek(index)
+ input.Release(marker)
+ }()
+
+ l.Consume(input)
+ return l.recog.Sempred(nil, ruleIndex, predIndex)
+}
+
+func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
+ settings.index = input.Index()
+ settings.line = l.Line
+ settings.column = l.CharPositionInLine
+ settings.dfaState = dfaState
+}
+
+func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
+ if to == nil && cfgs != nil {
+ // leading to l call, ATNConfigSet.hasSemanticContext is used as a
+ // marker indicating dynamic predicate evaluation makes l edge
+ // dependent on the specific input sequence, so the static edge in the
+ // DFA should be omitted. The target DFAState is still created since
+ // execATN has the ability to reSynchronize with the DFA state cache
+ // following the predicate evaluation step.
+ //
+ // TJP notes: next time through the DFA, we see a pred again and eval.
+ // If that gets us to a previously created (but dangling) DFA
+ // state, we can continue in pure DFA mode from there.
+ // /
+ suppressEdge := cfgs.HasSemanticContext()
+ cfgs.SetHasSemanticContext(false)
+
+ to = l.addDFAState(cfgs, true)
+
+ if suppressEdge {
+ return to
+ }
+ }
+ // add the edge
+ if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge {
+ // Only track edges within the DFA bounds
+ return to
+ }
+ if LexerATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
+ }
+ l.atn.edgeMu.Lock()
+ defer l.atn.edgeMu.Unlock()
+ if from.getEdges() == nil {
+ // make room for tokens 1..n and -1 masquerading as index 0
+ from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1))
+ }
+ from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect
+
+ return to
+}
+
+// Add a NewDFA state if there isn't one with l set of
+// configurations already. This method also detects the first
+// configuration containing an ATN rule stop state. Later, when
+// traversing the DFA, we will know which rule to accept.
+func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState {
+
+ proposed := NewDFAState(-1, configs)
+ var firstConfigWithRuleStopState ATNConfig
+
+ for _, cfg := range configs.GetItems() {
+
+ _, ok := cfg.GetState().(*RuleStopState)
+
+ if ok {
+ firstConfigWithRuleStopState = cfg
+ break
+ }
+ }
+ if firstConfigWithRuleStopState != nil {
+ proposed.isAcceptState = true
+ proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
+ proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
+ }
+ dfa := l.decisionToDFA[l.mode]
+
+ l.atn.stateMu.Lock()
+ defer l.atn.stateMu.Unlock()
+ existing, present := dfa.states.Get(proposed)
+ if present {
+
+ // This state was already present, so just return it.
+ //
+ proposed = existing
+ } else {
+
+ // We need to add the new state
+ //
+ proposed.stateNumber = dfa.states.Len()
+ configs.SetReadOnly(true)
+ proposed.configs = configs
+ dfa.states.Put(proposed)
+ }
+ if !suppressEdge {
+ dfa.setS0(proposed)
+ }
+ return proposed
+}
+
+func (l *LexerATNSimulator) getDFA(mode int) *DFA {
+ return l.decisionToDFA[mode]
+}
+
+// Get the text Matched so far for the current token.
+func (l *LexerATNSimulator) GetText(input CharStream) string {
+ // index is first lookahead char, don't include.
+ return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
+}
+
+func (l *LexerATNSimulator) Consume(input CharStream) {
+ curChar := input.LA(1)
+ if curChar == int('\n') {
+ l.Line++
+ l.CharPositionInLine = 0
+ } else {
+ l.CharPositionInLine++
+ }
+ input.Consume()
+}
+
+func (l *LexerATNSimulator) GetCharPositionInLine() int {
+ return l.CharPositionInLine
+}
+
+func (l *LexerATNSimulator) GetLine() int {
+ return l.Line
+}
+
+func (l *LexerATNSimulator) GetTokenName(tt int) string {
+ if tt == -1 {
+ return "EOF"
+ }
+
+ var sb strings.Builder
+ sb.Grow(6)
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(tt))
+ sb.WriteByte('\'')
+
+ return sb.String()
+}
+
+func resetSimState(sim *SimState) {
+ sim.index = -1
+ sim.line = 0
+ sim.column = -1
+ sim.dfaState = nil
+}
+
+type SimState struct {
+ index int
+ line int
+ column int
+ dfaState *DFAState
+}
+
+func NewSimState() *SimState {
+ s := new(SimState)
+ resetSimState(s)
+ return s
+}
+
+func (s *SimState) reset() {
+ resetSimState(s)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
new file mode 100644
index 0000000..7668961
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
@@ -0,0 +1,216 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type LL1Analyzer struct {
+ atn *ATN
+}
+
+func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
+ la := new(LL1Analyzer)
+ la.atn = atn
+ return la
+}
+
+// - Special value added to the lookahead sets to indicate that we hit
+// a predicate during analysis if {@code seeThruPreds==false}.
+//
+// /
+const (
+ LL1AnalyzerHitPred = TokenInvalidType
+)
+
+// *
+// Calculates the SLL(1) expected lookahead set for each outgoing transition
+// of an {@link ATNState}. The returned array has one element for each
+// outgoing transition in {@code s}. If the closure from transition
+// i leads to a semantic predicate before Matching a symbol, the
+// element at index i of the result will be {@code nil}.
+//
+// @param s the ATN state
+// @return the expected symbols for each outgoing transition of {@code s}.
+func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
+ if s == nil {
+ return nil
+ }
+ count := len(s.GetTransitions())
+ look := make([]*IntervalSet, count)
+ for alt := 0; alt < count; alt++ {
+ look[alt] = NewIntervalSet()
+ lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
+ seeThruPreds := false // fail to get lookahead upon pred
+ la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
+ // Wipe out lookahead for la alternative if we found nothing
+ // or we had a predicate when we !seeThruPreds
+ if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
+ look[alt] = nil
+ }
+ }
+ return look
+}
+
+// *
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+//
+//
If {@code ctx} is {@code nil} and the end of the rule containing
+// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
+// If {@code ctx} is not {@code nil} and the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
+//
+// @param s the ATN state
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx the complete parser context, or {@code nil} if the context
+// should be ignored
+//
+// @return The set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+// /
+func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
+ r := NewIntervalSet()
+ seeThruPreds := true // ignore preds get all lookahead
+ var lookContext PredictionContext
+ if ctx != nil {
+ lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
+ }
+ la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true)
+ return r
+}
+
+//*
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+//
+//
If {@code ctx} is {@code nil} and {@code stopState} or the end of the
+// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
+// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
+// {@code true} and {@code stopState} or the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
+//
+// @param s the ATN state.
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx The outer context, or {@code nil} if the outer context should
+// not be used.
+// @param look The result lookahead set.
+// @param lookBusy A set used for preventing epsilon closures in the ATN
+// from causing a stack overflow. Outside code should pass
+// {@code NewSet} for la argument.
+// @param calledRuleStack A set used for preventing left recursion in the
+// ATN from causing a stack overflow. Outside code should pass
+// {@code NewBitSet()} for la argument.
+// @param seeThruPreds {@code true} to true semantic predicates as
+// implicitly {@code true} and "see through them", otherwise {@code false}
+// to treat semantic predicates as opaque and add {@link //HitPred} to the
+// result if one is encountered.
+// @param addEOF Add {@link Token//EOF} to the result if the end of the
+// outermost context is reached. This parameter has no effect if {@code ctx}
+// is {@code nil}.
+
+func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
+
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
+
+func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
+
+ c := NewBaseATNConfig6(s, 0, ctx)
+
+ if lookBusy.Contains(c) {
+ return
+ }
+
+ _, present := lookBusy.Put(c)
+ if present {
+ return
+
+ }
+ if s == stopState {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+ }
+
+ _, ok := s.(*RuleStopState)
+
+ if ok {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+
+ if ctx != BasePredictionContextEMPTY {
+ removed := calledRuleStack.contains(s.GetRuleIndex())
+ defer func() {
+ if removed {
+ calledRuleStack.add(s.GetRuleIndex())
+ }
+ }()
+ calledRuleStack.remove(s.GetRuleIndex())
+ // run thru all possible stack tops in ctx
+ for i := 0; i < ctx.length(); i++ {
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
+ }
+ return
+ }
+ }
+
+ n := len(s.GetTransitions())
+
+ for i := 0; i < n; i++ {
+ t := s.GetTransitions()[i]
+
+ if t1, ok := t.(*RuleTransition); ok {
+ if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
+ continue
+ }
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+ la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
+ } else if t2, ok := t.(AbstractPredicateTransition); ok {
+ if seeThruPreds {
+ la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else {
+ look.addOne(LL1AnalyzerHitPred)
+ }
+ } else if t.getIsEpsilon() {
+ la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else if _, ok := t.(*WildcardTransition); ok {
+ look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
+ } else {
+ set := t.getLabel()
+ if set != nil {
+ if _, ok := t.(*NotSetTransition); ok {
+ set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
+ }
+ look.addSet(set)
+ }
+ }
+ }
+}
+
+func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+
+ defer func() {
+ calledRuleStack.remove(t1.getTarget().GetRuleIndex())
+ }()
+
+ calledRuleStack.add(t1.getTarget().GetRuleIndex())
+ la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
new file mode 100644
index 0000000..d26bf06
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
@@ -0,0 +1,708 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+type Parser interface {
+ Recognizer
+
+ GetInterpreter() *ParserATNSimulator
+
+ GetTokenStream() TokenStream
+ GetTokenFactory() TokenFactory
+ GetParserRuleContext() ParserRuleContext
+ SetParserRuleContext(ParserRuleContext)
+ Consume() Token
+ GetParseListeners() []ParseTreeListener
+
+ GetErrorHandler() ErrorStrategy
+ SetErrorHandler(ErrorStrategy)
+ GetInputStream() IntStream
+ GetCurrentToken() Token
+ GetExpectedTokens() *IntervalSet
+ NotifyErrorListeners(string, Token, RecognitionException)
+ IsExpectedToken(int) bool
+ GetPrecedence() int
+ GetRuleInvocationStack(ParserRuleContext) []string
+}
+
+type BaseParser struct {
+ *BaseRecognizer
+
+ Interpreter *ParserATNSimulator
+ BuildParseTrees bool
+
+ input TokenStream
+ errHandler ErrorStrategy
+ precedenceStack IntStack
+ ctx ParserRuleContext
+
+ tracer *TraceListener
+ parseListeners []ParseTreeListener
+ _SyntaxErrors int
+}
+
+// p.is all the parsing support code essentially most of it is error
+// recovery stuff.//
+func NewBaseParser(input TokenStream) *BaseParser {
+
+ p := new(BaseParser)
+
+ p.BaseRecognizer = NewBaseRecognizer()
+
+ // The input stream.
+ p.input = nil
+ // The error handling strategy for the parser. The default value is a new
+ // instance of {@link DefaultErrorStrategy}.
+ p.errHandler = NewDefaultErrorStrategy()
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+ // The {@link ParserRuleContext} object for the currently executing rule.
+ // p.is always non-nil during the parsing process.
+ p.ctx = nil
+ // Specifies whether or not the parser should construct a parse tree during
+ // the parsing process. The default value is {@code true}.
+ p.BuildParseTrees = true
+ // When {@link //setTrace}{@code (true)} is called, a reference to the
+ // {@link TraceListener} is stored here so it can be easily removed in a
+ // later call to {@link //setTrace}{@code (false)}. The listener itself is
+ // implemented as a parser listener so p.field is not directly used by
+ // other parser methods.
+ p.tracer = nil
+ // The list of {@link ParseTreeListener} listeners registered to receive
+ // events during the parse.
+ p.parseListeners = nil
+ // The number of syntax errors Reported during parsing. p.value is
+ // incremented each time {@link //NotifyErrorListeners} is called.
+ p._SyntaxErrors = 0
+ p.SetInputStream(input)
+
+ return p
+}
+
+// p.field maps from the serialized ATN string to the deserialized {@link
+// ATN} with
+// bypass alternatives.
+//
+// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
+var bypassAltsAtnCache = make(map[string]int)
+
+// reset the parser's state//
+func (p *BaseParser) reset() {
+ if p.input != nil {
+ p.input.Seek(0)
+ }
+ p.errHandler.reset(p)
+ p.ctx = nil
+ p._SyntaxErrors = 0
+ p.SetTrace(nil)
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+ if p.Interpreter != nil {
+ p.Interpreter.reset()
+ }
+}
+
+func (p *BaseParser) GetErrorHandler() ErrorStrategy {
+ return p.errHandler
+}
+
+func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
+ p.errHandler = e
+}
+
+// Match current input symbol against {@code ttype}. If the symbol type
+// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
+// called to complete the Match process.
+//
+//
If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @param ttype the token type to Match
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// {@code ttype} and the error strategy could not recover from the
+// mismatched symbol
+
+func (p *BaseParser) Match(ttype int) Token {
+
+ t := p.GetCurrentToken()
+
+ if t.GetTokenType() == ttype {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+ // we must have conjured up a Newtoken during single token
+ // insertion
+ // if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+
+ return t
+}
+
+// Match current input symbol as a wildcard. If the symbol type Matches
+// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
+// and {@link //consume} are called to complete the Match process.
+//
+//
If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// a wildcard and the error strategy could not recover from the mismatched
+// symbol
+
+func (p *BaseParser) MatchWildcard() Token {
+ t := p.GetCurrentToken()
+ if t.GetTokenType() > 0 {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+ // we must have conjured up a Newtoken during single token
+ // insertion
+ // if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+ return t
+}
+
+func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
+ return p.ctx
+}
+
+func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
+ p.ctx = v
+}
+
+func (p *BaseParser) GetParseListeners() []ParseTreeListener {
+ if p.parseListeners == nil {
+ return make([]ParseTreeListener, 0)
+ }
+ return p.parseListeners
+}
+
+// Registers {@code listener} to receive events during the parsing process.
+//
+//
To support output-preserving grammar transformations (including but not
+// limited to left-recursion removal, automated left-factoring, and
+// optimized code generation), calls to listener methods during the parse
+// may differ substantially from calls made by
+// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
+// particular, rule entry and exit events may occur in a different order
+// during the parse than after the parser. In addition, calls to certain
+// rule entry methods may be omitted.
+//
+//
With the following specific exceptions, calls to listener events are
+// deterministic, i.e. for identical input the calls to listener
+// methods will be the same.
+//
+//
+//
Alterations to the grammar used to generate code may change the
+// behavior of the listener calls.
+//
Alterations to the command line options passed to ANTLR 4 when
+// generating the parser may change the behavior of the listener calls.
+//
Changing the version of the ANTLR Tool used to generate the parser
+// may change the behavior of the listener calls.
+//
+//
+// @param listener the listener to add
+//
+// @panics nilPointerException if {@code} listener is {@code nil}
+func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
+ if listener == nil {
+ panic("listener")
+ }
+ if p.parseListeners == nil {
+ p.parseListeners = make([]ParseTreeListener, 0)
+ }
+ p.parseListeners = append(p.parseListeners, listener)
+}
+
+// Remove {@code listener} from the list of parse listeners.
+//
+//
If {@code listener} is {@code nil} or has not been added as a parse
+// listener, p.method does nothing.
+// @param listener the listener to remove
+func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
+
+ if p.parseListeners != nil {
+
+ idx := -1
+ for i, v := range p.parseListeners {
+ if v == listener {
+ idx = i
+ break
+ }
+ }
+
+ if idx == -1 {
+ return
+ }
+
+ // remove the listener from the slice
+ p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
+
+ if len(p.parseListeners) == 0 {
+ p.parseListeners = nil
+ }
+ }
+}
+
+// Remove all parse listeners.
+func (p *BaseParser) removeParseListeners() {
+ p.parseListeners = nil
+}
+
+// Notify any parse listeners of an enter rule event.
+func (p *BaseParser) TriggerEnterRuleEvent() {
+ if p.parseListeners != nil {
+ ctx := p.ctx
+ for _, listener := range p.parseListeners {
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+ }
+ }
+}
+
+// Notify any parse listeners of an exit rule event.
+//
+// @see //addParseListener
+func (p *BaseParser) TriggerExitRuleEvent() {
+ if p.parseListeners != nil {
+ // reverse order walk of listeners
+ ctx := p.ctx
+ l := len(p.parseListeners) - 1
+
+ for i := range p.parseListeners {
+ listener := p.parseListeners[l-i]
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+ }
+ }
+}
+
+func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
+ return p.Interpreter
+}
+
+func (p *BaseParser) GetATN() *ATN {
+ return p.Interpreter.atn
+}
+
+func (p *BaseParser) GetTokenFactory() TokenFactory {
+ return p.input.GetTokenSource().GetTokenFactory()
+}
+
+// Tell our token source and error strategy about a Newway to create tokens.//
+func (p *BaseParser) setTokenFactory(factory TokenFactory) {
+ p.input.GetTokenSource().setTokenFactory(factory)
+}
+
+// The ATN with bypass alternatives is expensive to create so we create it
+// lazily.
+//
+// @panics UnsupportedOperationException if the current parser does not
+// implement the {@link //getSerializedATN()} method.
+func (p *BaseParser) GetATNWithBypassAlts() {
+
+ // TODO
+ panic("Not implemented!")
+
+ // serializedAtn := p.getSerializedATN()
+ // if (serializedAtn == nil) {
+ // panic("The current parser does not support an ATN with bypass alternatives.")
+ // }
+ // result := p.bypassAltsAtnCache[serializedAtn]
+ // if (result == nil) {
+ // deserializationOptions := NewATNDeserializationOptions(nil)
+ // deserializationOptions.generateRuleBypassTransitions = true
+ // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
+ // p.bypassAltsAtnCache[serializedAtn] = result
+ // }
+ // return result
+}
+
+// The preferred method of getting a tree pattern. For example, here's a
+// sample use:
+//
+//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+//
+
+func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
+
+ panic("NewParseTreePatternMatcher not implemented!")
+ //
+ // if (lexer == nil) {
+ // if (p.GetTokenStream() != nil) {
+ // tokenSource := p.GetTokenStream().GetTokenSource()
+ // if _, ok := tokenSource.(ILexer); ok {
+ // lexer = tokenSource
+ // }
+ // }
+ // }
+ // if (lexer == nil) {
+ // panic("Parser can't discover a lexer to use")
+ // }
+
+ // m := NewParseTreePatternMatcher(lexer, p)
+ // return m.compile(pattern, patternRuleIndex)
+}
+
+func (p *BaseParser) GetInputStream() IntStream {
+ return p.GetTokenStream()
+}
+
+func (p *BaseParser) SetInputStream(input TokenStream) {
+ p.SetTokenStream(input)
+}
+
+func (p *BaseParser) GetTokenStream() TokenStream {
+ return p.input
+}
+
+// Set the token stream and reset the parser.//
+func (p *BaseParser) SetTokenStream(input TokenStream) {
+ p.input = nil
+ p.reset()
+ p.input = input
+}
+
+// Match needs to return the current input symbol, which gets put
+// into the label for the associated token ref e.g., x=ID.
+func (p *BaseParser) GetCurrentToken() Token {
+ return p.input.LT(1)
+}
+
+func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
+ if offendingToken == nil {
+ offendingToken = p.GetCurrentToken()
+ }
+ p._SyntaxErrors++
+ line := offendingToken.GetLine()
+ column := offendingToken.GetColumn()
+ listener := p.GetErrorListenerDispatch()
+ listener.SyntaxError(p, offendingToken, line, column, msg, err)
+}
+
+func (p *BaseParser) Consume() Token {
+ o := p.GetCurrentToken()
+ if o.GetTokenType() != TokenEOF {
+ p.GetInputStream().Consume()
+ }
+ hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
+ if p.BuildParseTrees || hasListener {
+ if p.errHandler.InErrorRecoveryMode(p) {
+ node := p.ctx.AddErrorNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitErrorNode(node)
+ }
+ }
+
+ } else {
+ node := p.ctx.AddTokenNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitTerminal(node)
+ }
+ }
+ }
+ // node.invokingState = p.state
+ }
+
+ return o
+}
+
+func (p *BaseParser) addContextToParseTree() {
+ // add current context to parent if we have a parent
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
+ }
+}
+
+func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
+ p.SetState(state)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.BuildParseTrees {
+ p.addContextToParseTree()
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent()
+ }
+}
+
+func (p *BaseParser) ExitRule() {
+ p.ctx.SetStop(p.input.LT(-1))
+ // trigger event on ctx, before it reverts to parent
+ if p.parseListeners != nil {
+ p.TriggerExitRuleEvent()
+ }
+ p.SetState(p.ctx.GetInvokingState())
+ if p.ctx.GetParent() != nil {
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ } else {
+ p.ctx = nil
+ }
+}
+
+func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
+ localctx.SetAltNumber(altNum)
+ // if we have Newlocalctx, make sure we replace existing ctx
+ // that is previous child of parse tree
+ if p.BuildParseTrees && p.ctx != localctx {
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
+ p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
+ }
+ }
+ p.ctx = localctx
+}
+
+// Get the precedence level for the top-most precedence rule.
+//
+// @return The precedence level for the top-most precedence rule, or -1 if
+// the parser context is not nested within a precedence rule.
+
+func (p *BaseParser) GetPrecedence() int {
+ if len(p.precedenceStack) == 0 {
+ return -1
+ }
+
+ return p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
+ p.SetState(state)
+ p.precedenceStack.Push(precedence)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+//
+// Like {@link //EnterRule} but for recursive rules.
+
+func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
+ previous := p.ctx
+ previous.SetParent(localctx)
+ previous.SetInvokingState(state)
+ previous.SetStop(p.input.LT(-1))
+
+ p.ctx = localctx
+ p.ctx.SetStart(previous.GetStart())
+ if p.BuildParseTrees {
+ p.ctx.AddChild(previous)
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
+ p.precedenceStack.Pop()
+ p.ctx.SetStop(p.input.LT(-1))
+ retCtx := p.ctx // save current ctx (return value)
+ // unroll so ctx is as it was before call to recursive method
+ if p.parseListeners != nil {
+ for p.ctx != parentCtx {
+ p.TriggerExitRuleEvent()
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ }
+ } else {
+ p.ctx = parentCtx
+ }
+ // hook into tree
+ retCtx.SetParent(parentCtx)
+ if p.BuildParseTrees && parentCtx != nil {
+ // add return ctx into invoking rule's tree
+ parentCtx.AddChild(retCtx)
+ }
+}
+
+func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
+ ctx := p.ctx
+ for ctx != nil {
+ if ctx.GetRuleIndex() == ruleIndex {
+ return ctx
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ return nil
+}
+
+func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
+ return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+func (p *BaseParser) inContext(context ParserRuleContext) bool {
+ // TODO: useful in parser?
+ return false
+}
+
+//
+// Checks whether or not {@code symbol} can follow the current state in the
+// ATN. The behavior of p.method is equivalent to the following, but is
+// implemented such that the complete context-sensitive follow set does not
+// need to be explicitly constructed.
+//
+//
+//
+// @param symbol the symbol type to check
+// @return {@code true} if {@code symbol} can follow the current state in
+// the ATN, otherwise {@code false}.
+
+func (p *BaseParser) IsExpectedToken(symbol int) bool {
+ atn := p.Interpreter.atn
+ ctx := p.ctx
+ s := atn.states[p.state]
+ following := atn.NextTokens(s, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ if !following.contains(TokenEpsilon) {
+ return false
+ }
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ if following.contains(TokenEpsilon) && symbol == TokenEOF {
+ return true
+ }
+
+ return false
+}
+
+// Computes the set of input symbols which could follow the current parser
+// state and context, as given by {@link //GetState} and {@link //GetContext},
+// respectively.
+//
+// @see ATN//getExpectedTokens(int, RuleContext)
+func (p *BaseParser) GetExpectedTokens() *IntervalSet {
+ return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
+}
+
+func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
+ atn := p.Interpreter.atn
+ s := atn.states[p.state]
+ return atn.NextTokens(s, nil)
+}
+
+// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
+func (p *BaseParser) GetRuleIndex(ruleName string) int {
+ var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
+ if ok {
+ return ruleIndex
+ }
+
+ return -1
+}
+
+// Return List<String> of the rule names in your parser instance
+// leading up to a call to the current rule. You could override if
+// you want more details such as the file/line info of where
+// in the ATN a rule is invoked.
+//
+// this very useful for error messages.
+
+func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
+ if c == nil {
+ c = p.ctx
+ }
+ stack := make([]string, 0)
+ for c != nil {
+ // compute what follows who invoked us
+ ruleIndex := c.GetRuleIndex()
+ if ruleIndex < 0 {
+ stack = append(stack, "n/a")
+ } else {
+ stack = append(stack, p.GetRuleNames()[ruleIndex])
+ }
+
+ vp := c.GetParent()
+
+ if vp == nil {
+ break
+ }
+
+ c = vp.(ParserRuleContext)
+ }
+ return stack
+}
+
+// For debugging and other purposes.//
+func (p *BaseParser) GetDFAStrings() string {
+ return fmt.Sprint(p.Interpreter.decisionToDFA)
+}
+
+// For debugging and other purposes.//
+func (p *BaseParser) DumpDFA() {
+ seenOne := false
+ for _, dfa := range p.Interpreter.decisionToDFA {
+ if dfa.states.Len() > 0 {
+ if seenOne {
+ fmt.Println()
+ }
+ fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
+ fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
+ seenOne = true
+ }
+ }
+}
+
+func (p *BaseParser) GetSourceName() string {
+ return p.GrammarFileName
+}
+
+// During a parse is sometimes useful to listen in on the rule entry and exit
+// events as well as token Matches. p.is for quick and dirty debugging.
+func (p *BaseParser) SetTrace(trace *TraceListener) {
+ if trace == nil {
+ p.RemoveParseListener(p.tracer)
+ p.tracer = nil
+ } else {
+ if p.tracer != nil {
+ p.RemoveParseListener(p.tracer)
+ }
+ p.tracer = NewTraceListener(p)
+ p.AddParseListener(p.tracer)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
new file mode 100644
index 0000000..8bcc46a
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
@@ -0,0 +1,1559 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ ParserATNSimulatorDebug = false
+ ParserATNSimulatorTraceATNSim = false
+ ParserATNSimulatorDFADebug = false
+ ParserATNSimulatorRetryDebug = false
+ TurnOffLRLoopEntryBranchOpt = false
+)
+
+type ParserATNSimulator struct {
+ *BaseATNSimulator
+
+ parser Parser
+ predictionMode int
+ input TokenStream
+ startIndex int
+ dfa *DFA
+ mergeCache *DoubleDict
+ outerContext ParserRuleContext
+}
+
+func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
+
+ p := new(ParserATNSimulator)
+
+ p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
+
+ p.parser = parser
+ p.decisionToDFA = decisionToDFA
+ // SLL, LL, or LL + exact ambig detection?//
+ p.predictionMode = PredictionModeLL
+ // LAME globals to avoid parameters!!!!! I need these down deep in predTransition
+ p.input = nil
+ p.startIndex = 0
+ p.outerContext = nil
+ p.dfa = nil
+ // Each prediction operation uses a cache for merge of prediction contexts.
+ // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
+ // isn't Synchronized but we're ok since two threads shouldn't reuse same
+ // parser/atnsim object because it can only handle one input at a time.
+ // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid
+ // the merge if we ever see a and b again. Note that (b,a)&rarrc should
+ // also be examined during cache lookup.
+ //
+ p.mergeCache = nil
+
+ return p
+}
+
+func (p *ParserATNSimulator) GetPredictionMode() int {
+ return p.predictionMode
+}
+
+func (p *ParserATNSimulator) SetPredictionMode(v int) {
+ p.predictionMode = v
+}
+
+func (p *ParserATNSimulator) reset() {
+}
+
+func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int {
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
+ fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
+ strconv.Itoa(input.LT(1).GetColumn()))
+ }
+
+ p.input = input
+ p.startIndex = input.Index()
+ p.outerContext = outerContext
+
+ dfa := p.decisionToDFA[decision]
+ p.dfa = dfa
+ m := input.Mark()
+ index := input.Index()
+
+ defer func() {
+ p.dfa = nil
+ p.mergeCache = nil // wack cache after each prediction
+ input.Seek(index)
+ input.Release(m)
+ }()
+
+ // Now we are certain to have a specific decision's DFA
+ // But, do we still need an initial state?
+ var s0 *DFAState
+ p.atn.stateMu.RLock()
+ if dfa.getPrecedenceDfa() {
+ p.atn.edgeMu.RLock()
+ // the start state for a precedence DFA depends on the current
+ // parser precedence, and is provided by a DFA method.
+ s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence())
+ p.atn.edgeMu.RUnlock()
+ } else {
+ // the start state for a "regular" DFA is just s0
+ s0 = dfa.getS0()
+ }
+ p.atn.stateMu.RUnlock()
+
+ if s0 == nil {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ if ParserATNSimulatorDebug {
+ fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
+ }
+ fullCtx := false
+ s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx)
+
+ p.atn.stateMu.Lock()
+ if dfa.getPrecedenceDfa() {
+ // If p is a precedence DFA, we use applyPrecedenceFilter
+ // to convert the computed start state to a precedence start
+ // state. We then use DFA.setPrecedenceStartState to set the
+ // appropriate start state for the precedence level rather
+ // than simply setting DFA.s0.
+ //
+ dfa.s0.configs = s0Closure
+ s0Closure = p.applyPrecedenceFilter(s0Closure)
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ p.atn.edgeMu.Lock()
+ dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0)
+ p.atn.edgeMu.Unlock()
+ } else {
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ dfa.setS0(s0)
+ }
+ p.atn.stateMu.Unlock()
+ }
+
+ alt := p.execATN(dfa, s0, input, index, outerContext)
+ if ParserATNSimulatorDebug {
+ fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
+ }
+ return alt
+
+}
+
+// Performs ATN simulation to compute a predicted alternative based
+// upon the remaining input, but also updates the DFA cache to avoid
+// having to traverse the ATN again for the same input sequence.
+
+// There are some key conditions we're looking for after computing a new
+// set of ATN configs (proposed DFA state):
+// if the set is empty, there is no viable alternative for current symbol
+// does the state uniquely predict an alternative?
+// does the state have a conflict that would prevent us from
+// putting it on the work list?
+
+// We also have some key operations to do:
+// add an edge from previous DFA state to potentially NewDFA state, D,
+// upon current symbol but only if adding to work list, which means in all
+// cases except no viable alternative (and possibly non-greedy decisions?)
+// collecting predicates and adding semantic context to DFA accept states
+// adding rule context to context-sensitive DFA accept states
+// consuming an input symbol
+// Reporting a conflict
+// Reporting an ambiguity
+// Reporting a context sensitivity
+// Reporting insufficient predicates
+
+// cover these cases:
+//
+// dead end
+// single alt
+// single alt + preds
+// conflict
+// conflict + preds
+func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
+
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
+ fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
+ ", DFA state " + s0.String() +
+ ", LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
+ }
+
+ previousD := s0
+
+ if ParserATNSimulatorDebug {
+ fmt.Println("s0 = " + s0.String())
+ }
+ t := input.LA(1)
+ for { // for more work
+ D := p.getExistingTargetState(previousD, t)
+ if D == nil {
+ D = p.computeTargetState(dfa, previousD, t)
+ }
+ if D == ATNSimulatorError {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for SLL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previousD.configs, startIndex)
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt
+ }
+
+ panic(e)
+ }
+ if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
+ // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
+ conflictingAlts := D.configs.GetConflictingAlts()
+ if D.predicates != nil {
+ if ParserATNSimulatorDebug {
+ fmt.Println("DFA state has preds in DFA sim LL failover")
+ }
+ conflictIndex := input.Index()
+ if conflictIndex != startIndex {
+ input.Seek(startIndex)
+ }
+ conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
+ if conflictingAlts.length() == 1 {
+ if ParserATNSimulatorDebug {
+ fmt.Println("Full LL avoided")
+ }
+ return conflictingAlts.minValue()
+ }
+ if conflictIndex != startIndex {
+ // restore the index so Reporting the fallback to full
+ // context occurs with the index at the correct spot
+ input.Seek(conflictIndex)
+ }
+ }
+ if ParserATNSimulatorDFADebug {
+ fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
+ }
+ fullCtx := true
+ s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
+ p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
+ alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
+ return alt
+ }
+ if D.isAcceptState {
+ if D.predicates == nil {
+ return D.prediction
+ }
+ stopIndex := input.Index()
+ input.Seek(startIndex)
+ alts := p.evalSemanticContext(D.predicates, outerContext, true)
+
+ switch alts.length() {
+ case 0:
+ panic(p.noViableAlt(input, outerContext, D.configs, startIndex))
+ case 1:
+ return alts.minValue()
+ default:
+ // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
+ p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
+ return alts.minValue()
+ }
+ }
+ previousD = D
+
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// p method returns {@code nil}.
+//
+// @param previousD The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for p edge is not
+// already cached
+
+func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
+ if t+1 < 0 {
+ return nil
+ }
+
+ p.atn.edgeMu.RLock()
+ defer p.atn.edgeMu.RUnlock()
+ edges := previousD.getEdges()
+ if edges == nil || t+1 >= len(edges) {
+ return nil
+ }
+ return previousD.getIthEdge(t + 1)
+}
+
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
+//
+// @param dfa The DFA
+// @param previousD The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, p method
+// returns {@link //ERROR}.
+
+func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
+ reach := p.computeReachSet(previousD.configs, t, false)
+
+ if reach == nil {
+ p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
+ return ATNSimulatorError
+ }
+ // create Newtarget state we'll add to DFA after it's complete
+ D := NewDFAState(-1, reach)
+
+ predictedAlt := p.getUniqueAlt(reach)
+
+ if ParserATNSimulatorDebug {
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
+ ", previous=" + previousD.configs.String() +
+ ", configs=" + reach.String() +
+ ", predict=" + strconv.Itoa(predictedAlt) +
+ ", allSubsetsConflict=" +
+ fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) +
+ ", conflictingAlts=" + p.getConflictingAlts(reach).String())
+ }
+ if predictedAlt != ATNInvalidAltNumber {
+ // NO CONFLICT, UNIQUELY PREDICTED ALT
+ D.isAcceptState = true
+ D.configs.SetUniqueAlt(predictedAlt)
+ D.setPrediction(predictedAlt)
+ } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
+ // MORE THAN ONE VIABLE ALTERNATIVE
+ D.configs.SetConflictingAlts(p.getConflictingAlts(reach))
+ D.requiresFullContext = true
+ // in SLL-only mode, we will stop at p state and return the minimum alt
+ D.isAcceptState = true
+ D.setPrediction(D.configs.GetConflictingAlts().minValue())
+ }
+ if D.isAcceptState && D.configs.HasSemanticContext() {
+ p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
+ if D.predicates != nil {
+ D.setPrediction(ATNInvalidAltNumber)
+ }
+ }
+ // all adds to dfa are done after we've created full D state
+ D = p.addDFAEdge(dfa, previousD, t, D)
+ return D
+}
+
+func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) {
+ // We need to test all predicates, even in DFA states that
+ // uniquely predict alternative.
+ nalts := len(decisionState.GetTransitions())
+ // Update DFA so reach becomes accept state with (predicate,alt)
+ // pairs if preds found for conflicting alts
+ altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs)
+ altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
+ if altToPred != nil {
+ dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
+ dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds
+ } else {
+ // There are preds in configs but they might go away
+ // when OR'd together like {p}? || NONE == NONE. If neither
+ // alt has preds, resolve to min alt
+ dfaState.setPrediction(altsToCollectPredsFrom.minValue())
+ }
+}
+
+// comes back with reach.uniqueAlt set to a valid alt
+func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
+
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
+ fmt.Println("execATNWithFullContext " + s0.String())
+ }
+
+ fullCtx := true
+ foundExactAmbig := false
+ var reach ATNConfigSet
+ previous := s0
+ input.Seek(startIndex)
+ t := input.LA(1)
+ predictedAlt := -1
+
+ for { // for more work
+ reach = p.computeReachSet(previous, t, fullCtx)
+ if reach == nil {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for LL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previous, startIndex)
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt
+ }
+
+ panic(e)
+ }
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ if ParserATNSimulatorDebug {
+ fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
+ strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
+ fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
+ }
+ reach.SetUniqueAlt(p.getUniqueAlt(reach))
+ // unique prediction?
+ if reach.GetUniqueAlt() != ATNInvalidAltNumber {
+ predictedAlt = reach.GetUniqueAlt()
+ break
+ }
+ if p.predictionMode != PredictionModeLLExactAmbigDetection {
+ predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets)
+ if predictedAlt != ATNInvalidAltNumber {
+ break
+ }
+ } else {
+ // In exact ambiguity mode, we never try to terminate early.
+ // Just keeps scarfing until we know what the conflict is
+ if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) {
+ foundExactAmbig = true
+ predictedAlt = PredictionModegetSingleViableAlt(altSubSets)
+ break
+ }
+ // else there are multiple non-conflicting subsets or
+ // we're not sure what the ambiguity is yet.
+ // So, keep going.
+ }
+ previous = reach
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+ // If the configuration set uniquely predicts an alternative,
+ // without conflict, then we know that it's a full LL decision
+ // not SLL.
+ if reach.GetUniqueAlt() != ATNInvalidAltNumber {
+ p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
+ return predictedAlt
+ }
+ // We do not check predicates here because we have checked them
+ // on-the-fly when doing full context prediction.
+
+ //
+ // In non-exact ambiguity detection mode, we might actually be able to
+ // detect an exact ambiguity, but I'm not going to spend the cycles
+ // needed to check. We only emit ambiguity warnings in exact ambiguity
+ // mode.
+ //
+ // For example, we might know that we have conflicting configurations.
+ // But, that does not mean that there is no way forward without a
+ // conflict. It's possible to have nonconflicting alt subsets as in:
+
+ // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
+
+ // from
+ //
+ // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
+ // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
+ //
+ // In p case, (17,1,[5 $]) indicates there is some next sequence that
+ // would resolve p without conflict to alternative 1. Any other viable
+ // next sequence, however, is associated with a conflict. We stop
+ // looking for input because no amount of further lookahead will alter
+ // the fact that we should predict alternative 1. We just can't say for
+ // sure that there is an ambiguity without looking further.
+
+ p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach)
+
+ return predictedAlt
+}
+
+func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet {
+ if p.mergeCache == nil {
+ p.mergeCache = NewDoubleDict()
+ }
+ intermediate := NewBaseATNConfigSet(fullCtx)
+
+ // Configurations already in a rule stop state indicate reaching the end
+ // of the decision rule (local context) or end of the start rule (full
+ // context). Once reached, these configurations are never updated by a
+ // closure operation, so they are handled separately for the performance
+ // advantage of having a smaller intermediate set when calling closure.
+ //
+ // For full-context reach operations, separate handling is required to
+ // ensure that the alternative Matching the longest overall sequence is
+ // chosen when multiple such configurations can Match the input.
+
+ var skippedStopStates []*BaseATNConfig
+
+ // First figure out where we can reach on input t
+ for _, c := range closure.GetItems() {
+ if ParserATNSimulatorDebug {
+ fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
+ }
+
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ if fullCtx || t == TokenEOF {
+ skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig))
+ if ParserATNSimulatorDebug {
+ fmt.Println("added " + c.String() + " to SkippedStopStates")
+ }
+ }
+ continue
+ }
+
+ for _, trans := range c.GetState().GetTransitions() {
+ target := p.getReachableTarget(trans, t)
+ if target != nil {
+ cfg := NewBaseATNConfig4(c, target)
+ intermediate.Add(cfg, p.mergeCache)
+ if ParserATNSimulatorDebug {
+ fmt.Println("added " + cfg.String() + " to intermediate")
+ }
+ }
+ }
+ }
+
+ // Now figure out where the reach operation can take us...
+ var reach ATNConfigSet
+
+ // This block optimizes the reach operation for intermediate sets which
+ // trivially indicate a termination state for the overall
+ // AdaptivePredict operation.
+ //
+ // The conditions assume that intermediate
+ // contains all configurations relevant to the reach set, but p
+ // condition is not true when one or more configurations have been
+ // withheld in SkippedStopStates, or when the current symbol is EOF.
+ //
+ if skippedStopStates == nil && t != TokenEOF {
+ if len(intermediate.configs) == 1 {
+ // Don't pursue the closure if there is just one state.
+ // It can only have one alternative just add to result
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber {
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ }
+ }
+ // If the reach set could not be trivially determined, perform a closure
+ // operation on the intermediate set to compute its initial value.
+ //
+ if reach == nil {
+ reach = NewBaseATNConfigSet(fullCtx)
+ closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
+ treatEOFAsEpsilon := t == TokenEOF
+ amount := len(intermediate.configs)
+ for k := 0; k < amount; k++ {
+ p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon)
+ }
+ }
+ if t == TokenEOF {
+ // After consuming EOF no additional input is possible, so we are
+ // only interested in configurations which reached the end of the
+ // decision rule (local context) or end of the start rule (full
+ // context). Update reach to contain only these configurations. This
+ // handles both explicit EOF transitions in the grammar and implicit
+ // EOF transitions following the end of the decision or start rule.
+ //
+ // When reach==intermediate, no closure operation was performed. In
+ // p case, removeAllConfigsNotInRuleStopState needs to check for
+ // reachable rule stop states as well as configurations already in
+ // a rule stop state.
+ //
+ // This is handled before the configurations in SkippedStopStates,
+ // because any configurations potentially added from that list are
+ // already guaranteed to meet p condition whether or not it's
+ // required.
+ //
+ reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate)
+ }
+ // If SkippedStopStates!=nil, then it contains at least one
+ // configuration. For full-context reach operations, these
+ // configurations reached the end of the start rule, in which case we
+ // only add them back to reach if no configuration during the current
+ // closure operation reached such a state. This ensures AdaptivePredict
+ // chooses an alternative Matching the longest overall sequence when
+ // multiple alternatives are viable.
+ //
+ if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
+ for l := 0; l < len(skippedStopStates); l++ {
+ reach.Add(skippedStopStates[l], p.mergeCache)
+ }
+ }
+
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String())
+ }
+
+ if len(reach.GetItems()) == 0 {
+ return nil
+ }
+
+ return reach
+}
+
+// Return a configuration set containing only the configurations from
+// {@code configs} which are in a {@link RuleStopState}. If all
+// configurations in {@code configs} are already in a rule stop state, p
+// method simply returns {@code configs}.
+//
+//
When {@code lookToEndOfRule} is true, p method uses
+// {@link ATN//NextTokens} for each configuration in {@code configs} which is
+// not already in a rule stop state to see if a rule stop state is reachable
+// from the configuration via epsilon-only transitions.
+//
+// @param configs the configuration set to update
+// @param lookToEndOfRule when true, p method checks for rule stop states
+// reachable by epsilon-only transitions from each configuration in
+// {@code configs}.
+//
+// @return {@code configs} if all configurations in {@code configs} are in a
+// rule stop state, otherwise return a Newconfiguration set containing only
+// the configurations from {@code configs} which are in a rule stop state
+func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet {
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return configs
+ }
+ result := NewBaseATNConfigSet(configs.FullContext())
+ for _, config := range configs.GetItems() {
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ result.Add(config, p.mergeCache)
+ continue
+ }
+ if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() {
+ NextTokens := p.atn.NextTokens(config.GetState(), nil)
+ if NextTokens.contains(TokenEpsilon) {
+ endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
+ result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache)
+ }
+ }
+ }
+ return result
+}
+
+func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet {
+ // always at least the implicit call to start rule
+ initialContext := predictionContextFromRuleContext(p.atn, ctx)
+ configs := NewBaseATNConfigSet(fullCtx)
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
+ fmt.Println("computeStartState from ATN state " + a.String() +
+ " initialContext=" + initialContext.String())
+ }
+
+ for i := 0; i < len(a.GetTransitions()); i++ {
+ target := a.GetTransitions()[i].getTarget()
+ c := NewBaseATNConfig6(target, i+1, initialContext)
+ closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
+ p.closure(c, configs, closureBusy, true, fullCtx, false)
+ }
+ return configs
+}
+
+// This method transforms the start state computed by
+// {@link //computeStartState} to the special start state used by a
+// precedence DFA for a particular precedence value. The transformation
+// process applies the following changes to the start state's configuration
+// set.
+//
+//
+//
Evaluate the precedence predicates for each configuration using
+// {@link SemanticContext//evalPrecedence}.
+//
Remove all configurations which predict an alternative greater than
+// 1, for which another configuration that predicts alternative 1 is in the
+// same ATN state with the same prediction context. This transformation is
+// valid for the following reasons:
+//
+//
The closure block cannot contain any epsilon transitions which bypass
+// the body of the closure, so all states reachable via alternative 1 are
+// part of the precedence alternatives of the transformed left-recursive
+// rule.
+//
The "primary" portion of a left recursive rule cannot contain an
+// epsilon transition, so the only way an alternative other than 1 can exist
+// in a state that is also reachable via alternative 1 is by nesting calls
+// to the left-recursive rule, with the outer calls not being at the
+// preferred precedence level.
+//
+//
+//
+//
+//
+// The prediction context must be considered by p filter to address
+// situations like the following.
+//
+// If the above grammar, the ATN state immediately before the token
+// reference {@code 'a'} in {@code letterA} is reachable from the left edge
+// of both the primary and closure blocks of the left-recursive rule
+// {@code statement}. The prediction context associated with each of these
+// configurations distinguishes between them, and prevents the alternative
+// which stepped out to {@code prog} (and then back in to {@code statement}
+// from being eliminated by the filter.
+//
+//
+// @param configs The configuration set computed by
+// {@link //computeStartState} as the start state for the DFA.
+// @return The transformed configuration set representing the start state
+// for a precedence DFA at a particular precedence level (determined by
+// calling {@link Parser//getPrecedence}).
+func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet {
+
+ statesFromAlt1 := make(map[int]PredictionContext)
+ configSet := NewBaseATNConfigSet(configs.FullContext())
+
+ for _, config := range configs.GetItems() {
+ // handle alt 1 first
+ if config.GetAlt() != 1 {
+ continue
+ }
+ updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext)
+ if updatedContext == nil {
+ // the configuration was eliminated
+ continue
+ }
+ statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
+ if updatedContext != config.GetSemanticContext() {
+ configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache)
+ } else {
+ configSet.Add(config, p.mergeCache)
+ }
+ }
+ for _, config := range configs.GetItems() {
+
+ if config.GetAlt() == 1 {
+ // already handled
+ continue
+ }
+ // In the future, p elimination step could be updated to also
+ // filter the prediction context for alternatives predicting alt>1
+ // (basically a graph subtraction algorithm).
+ if !config.getPrecedenceFilterSuppressed() {
+ context := statesFromAlt1[config.GetState().GetStateNumber()]
+ if context != nil && context.Equals(config.GetContext()) {
+ // eliminated
+ continue
+ }
+ }
+ configSet.Add(config, p.mergeCache)
+ }
+ return configSet
+}
+
+func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState {
+ if trans.Matches(ttype, 0, p.atn.maxTokenType) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext {
+
+ altToPred := make([]SemanticContext, nalts+1)
+ for _, c := range configs.GetItems() {
+ if ambigAlts.contains(c.GetAlt()) {
+ altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
+ }
+ }
+ nPredAlts := 0
+ for i := 1; i <= nalts; i++ {
+ pred := altToPred[i]
+ if pred == nil {
+ altToPred[i] = SemanticContextNone
+ } else if pred != SemanticContextNone {
+ nPredAlts++
+ }
+ }
+ // nonambig alts are nil in altToPred
+ if nPredAlts == 0 {
+ altToPred = nil
+ }
+ if ParserATNSimulatorDebug {
+ fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
+ }
+ return altToPred
+}
+
+func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction {
+ pairs := make([]*PredPrediction, 0)
+ containsPredicate := false
+ for i := 1; i < len(altToPred); i++ {
+ pred := altToPred[i]
+ // unpredicated is indicated by SemanticContextNONE
+ if ambigAlts != nil && ambigAlts.contains(i) {
+ pairs = append(pairs, NewPredPrediction(pred, i))
+ }
+ if pred != SemanticContextNone {
+ containsPredicate = true
+ }
+ }
+ if !containsPredicate {
+ return nil
+ }
+ return pairs
+}
+
+// This method is used to improve the localization of error messages by
+// choosing an alternative rather than panicing a
+// {@link NoViableAltException} in particular prediction scenarios where the
+// {@link //ERROR} state was reached during ATN simulation.
+//
+//
+// The default implementation of p method uses the following
+// algorithm to identify an ATN configuration which successfully parsed the
+// decision entry rule. Choosing such an alternative ensures that the
+// {@link ParserRuleContext} returned by the calling rule will be complete
+// and valid, and the syntax error will be Reported later at a more
+// localized location.
+//
+//
+//
If a syntactically valid path or paths reach the end of the decision rule and
+// they are semantically valid if predicated, return the min associated alt.
+//
Else, if a semantically invalid but syntactically valid path exist
+// or paths exist, return the minimum associated alt.
+//
+// In some scenarios, the algorithm described above could predict an
+// alternative which will result in a {@link FailedPredicateException} in
+// the parser. Specifically, p could occur if the only configuration
+// capable of successfully parsing to the end of the decision rule is
+// blocked by a semantic predicate. By choosing p alternative within
+// {@link //AdaptivePredict} instead of panicing a
+// {@link NoViableAltException}, the resulting
+// {@link FailedPredicateException} in the parser will identify the specific
+// predicate which is preventing the parser from successfully parsing the
+// decision rule, which helps developers identify and correct logic errors
+// in semantic predicates.
+//
+//
+// @param configs The ATN configurations which were valid immediately before
+// the {@link //ERROR} state was reached
+// @param outerContext The is the \gamma_0 initial parser context from the paper
+// or the parser stack at the instant before prediction commences.
+//
+// @return The value to return from {@link //AdaptivePredict}, or
+// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not
+// identified and {@link //AdaptivePredict} should Report an error instead.
+func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int {
+ cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
+ semValidConfigs := cfgs[0]
+ semInvalidConfigs := cfgs[1]
+ alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs)
+ if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists
+ return alt
+ }
+ // Is there a syntactically valid path with a failed pred?
+ if len(semInvalidConfigs.GetItems()) > 0 {
+ alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
+ if alt != ATNInvalidAltNumber { // syntactically viable path exists
+ return alt
+ }
+ }
+ return ATNInvalidAltNumber
+}
+
+func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int {
+ alts := NewIntervalSet()
+
+ for _, c := range configs.GetItems() {
+ _, ok := c.GetState().(*RuleStopState)
+
+ if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
+ alts.addOne(c.GetAlt())
+ }
+ }
+ if alts.length() == 0 {
+ return ATNInvalidAltNumber
+ }
+
+ return alts.first()
+}
+
+// Walk the list of configurations and split them according to
+// those that have preds evaluating to true/false. If no pred, assume
+// true pred and include in succeeded set. Returns Pair of sets.
+//
+// Create a NewSet so as not to alter the incoming parameter.
+//
+// Assumption: the input stream has been restored to the starting point
+// prediction, which is where predicates need to evaluate.
+
+type ATNConfigSetPair struct {
+ item0, item1 ATNConfigSet
+}
+
+func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet {
+ succeeded := NewBaseATNConfigSet(configs.FullContext())
+ failed := NewBaseATNConfigSet(configs.FullContext())
+
+ for _, c := range configs.GetItems() {
+ if c.GetSemanticContext() != SemanticContextNone {
+ predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
+ if predicateEvaluationResult {
+ succeeded.Add(c, nil)
+ } else {
+ failed.Add(c, nil)
+ }
+ } else {
+ succeeded.Add(c, nil)
+ }
+ }
+ return []ATNConfigSet{succeeded, failed}
+}
+
+// Look through a list of predicate/alt pairs, returning alts for the
+//
+// pairs that win. A {@code NONE} predicate indicates an alt containing an
+// unpredicated config which behaves as "always true." If !complete
+// then we stop at the first predicate that evaluates to true. This
+// includes pairs with nil predicates.
+func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
+ predictions := NewBitSet()
+ for i := 0; i < len(predPredictions); i++ {
+ pair := predPredictions[i]
+ if pair.pred == SemanticContextNone {
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ continue
+ }
+
+ predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
+ if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
+ fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
+ }
+ if predicateEvaluationResult {
+ if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
+ fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
+ }
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ }
+ }
+ return predictions
+}
+
+func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
+ initialDepth := 0
+ p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
+ fullCtx, initialDepth, treatEOFAsEpsilon)
+}
+
+func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("closure(" + config.String() + ")")
+ //fmt.Println("configs(" + configs.String() + ")")
+ if config.GetReachesIntoOuterContext() > 50 {
+ panic("problem")
+ }
+ }
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !config.GetContext().isEmpty() {
+ for i := 0; i < config.GetContext().length(); i++ {
+ if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if ParserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ newContext := config.GetContext().GetParent(i) // "pop" return state
+
+ c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext())
+ p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon)
+ }
+ return
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(config, p.mergeCache)
+ return
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if ParserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ }
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+}
+
+// Do the actual work of walking epsilon edges//
+func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ state := config.GetState()
+ // optimization
+ if !state.GetEpsilonOnlyTransitions() {
+ configs.Add(config, p.mergeCache)
+ // make sure to not return here, because EOF transitions can act as
+ // both epsilon transitions and non-epsilon transitions.
+ }
+ for i := 0; i < len(state.GetTransitions()); i++ {
+ if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) {
+ continue
+ }
+
+ t := state.GetTransitions()[i]
+ _, ok := t.(*ActionTransition)
+ continueCollecting := collectPredicates && !ok
+ c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
+ if ci, ok := c.(*BaseATNConfig); ok && ci != nil {
+ newDepth := depth
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // target fell off end of rule mark resulting c as having dipped into outer context
+ // We can't get here if incoming config was rule stop and we had context
+ // track how far we dip into outer context. Might
+ // come in handy and we avoid evaluating context dependent
+ // preds if p is > 0.
+
+ if p.dfa != nil && p.dfa.getPrecedenceDfa() {
+ if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
+ c.setPrecedenceFilterSuppressed(true)
+ }
+ }
+
+ c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
+
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for right-recursive rules
+ continue
+ }
+
+ configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method
+ newDepth--
+ if ParserATNSimulatorDebug {
+ fmt.Println("dips into outer ctx: " + c.String())
+ }
+ } else {
+
+ if !t.getIsEpsilon() {
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for EOF* and EOF+
+ continue
+ }
+ }
+ if _, ok := t.(*RuleTransition); ok {
+ // latch when newDepth goes negative - once we step out of the entry context we can't return
+ if newDepth >= 0 {
+ newDepth++
+ }
+ }
+ }
+ p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon)
+ }
+ }
+}
+
+func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool {
+ if TurnOffLRLoopEntryBranchOpt {
+ return false
+ }
+
+ _p := config.GetState()
+
+ // First check to see if we are in StarLoopEntryState generated during
+ // left-recursion elimination. For efficiency, also check if
+ // the context has an empty stack case. If so, it would mean
+ // global FOLLOW so we can't perform optimization
+ if _p.GetStateType() != ATNStateStarLoopEntry {
+ return false
+ }
+ startLoop, ok := _p.(*StarLoopEntryState)
+ if !ok {
+ return false
+ }
+ if !startLoop.precedenceRuleDecision ||
+ config.GetContext().isEmpty() ||
+ config.GetContext().hasEmptyPath() {
+ return false
+ }
+
+ // Require all return states to return back to the same rule
+ // that p is in.
+ numCtxs := config.GetContext().length()
+ for i := 0; i < numCtxs; i++ {
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ if returnState.GetRuleIndex() != _p.GetRuleIndex() {
+ return false
+ }
+ }
+ x := _p.GetTransitions()[0].getTarget()
+ decisionStartState := x.(BlockStartState)
+ blockEndStateNum := decisionStartState.getEndState().stateNumber
+ blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
+
+ // Verify that the top of each stack context leads to loop entry/exit
+ // state through epsilon edges and w/o leaving rule.
+
+ for i := 0; i < numCtxs; i++ { // for each stack context
+ returnStateNumber := config.GetContext().getReturnState(i)
+ returnState := p.atn.states[returnStateNumber]
+
+ // all states must have single outgoing epsilon edge
+ if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() {
+ return false
+ }
+
+ // Look for prefix op case like 'not expr', (' type ')' expr
+ returnStateTarget := returnState.GetTransitions()[0].getTarget()
+ if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p {
+ continue
+ }
+
+ // Look for 'expr op expr' or case where expr's return state is block end
+ // of (...)* internal block; the block end points to loop back
+ // which points to p but we don't need to check that
+ if returnState == blockEndState {
+ continue
+ }
+
+ // Look for ternary expr ? expr : expr. The return state points at block end,
+ // which points at loop entry state
+ if returnStateTarget == blockEndState {
+ continue
+ }
+
+ // Look for complex prefix 'between expr and expr' case where 2nd expr's
+ // return state points at block end state of (...)* internal block
+ if returnStateTarget.GetStateType() == ATNStateBlockEnd &&
+ len(returnStateTarget.GetTransitions()) == 1 &&
+ returnStateTarget.GetTransitions()[0].getIsEpsilon() &&
+ returnStateTarget.GetTransitions()[0].getTarget() == _p {
+ continue
+ }
+
+ // anything else ain't conforming
+ return false
+ }
+
+ return true
+}
+
+func (p *ParserATNSimulator) getRuleName(index int) string {
+ if p.parser != nil && index >= 0 {
+ return p.parser.GetRuleNames()[index]
+ }
+ var sb strings.Builder
+ sb.Grow(32)
+
+ sb.WriteString("')
+ return sb.String()
+}
+
+func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig {
+
+ switch t.getSerializationType() {
+ case TransitionRULE:
+ return p.ruleTransition(config, t.(*RuleTransition))
+ case TransitionPRECEDENCE:
+ return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionPREDICATE:
+ return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionACTION:
+ return p.actionTransition(config, t.(*ActionTransition))
+ case TransitionEPSILON:
+ return NewBaseATNConfig4(config, t.getTarget())
+ case TransitionATOM, TransitionRANGE, TransitionSET:
+ // EOF transitions act like epsilon transitions after the first EOF
+ // transition is traversed
+ if treatEOFAsEpsilon {
+ if t.Matches(TokenEOF, 0, 1) {
+ return NewBaseATNConfig4(config, t.getTarget())
+ }
+ }
+ return nil
+ default:
+ return nil
+ }
+}
+
+func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig {
+ if ParserATNSimulatorDebug {
+ fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex))
+ }
+ return NewBaseATNConfig4(config, t.getTarget())
+}
+
+func (p *ParserATNSimulator) precedenceTransition(config ATNConfig,
+ pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
+
+ if ParserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " +
+ strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true")
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *BaseATNConfig
+ if collectPredicates && inContext {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the config sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewBaseATNConfig4(config, pt.getTarget())
+ }
+ if ParserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
+ }
+ return c
+}
+
+func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
+
+ if ParserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) +
+ ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent))
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *BaseATNConfig
+ if collectPredicates && (!pt.isCtxDependent || inContext) {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the config sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewBaseATNConfig4(config, pt.getTarget())
+ }
+ if ParserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
+ }
+ return c
+}
+
+func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig {
+ if ParserATNSimulatorDebug {
+ fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String())
+ }
+ returnState := t.followState
+ newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber())
+ return NewBaseATNConfig1(config, t.getTarget(), newContext)
+}
+
+func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet {
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModeGetAlts(altsets)
+}
+
+// Sam pointed out a problem with the previous definition, v3, of
+// ambiguous states. If we have another state associated with conflicting
+// alternatives, we should keep going. For example, the following grammar
+//
+// s : (ID | ID ID?) ''
+//
+// When the ATN simulation reaches the state before '', it has a DFA
+// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally
+// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node
+// because alternative to has another way to continue, via [6|2|[]].
+// The key is that we have a single state that has config's only associated
+// with a single alternative, 2, and crucially the state transitions
+// among the configurations are all non-epsilon transitions. That means
+// we don't consider any conflicts that include alternative 2. So, we
+// ignore the conflict between alts 1 and 2. We ignore a set of
+// conflicting alts when there is an intersection with an alternative
+// associated with a single alt state in the state&rarrconfig-list map.
+//
+// It's also the case that we might have two conflicting configurations but
+// also a 3rd nonconflicting configuration for a different alternative:
+// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar:
+//
+// a : A | A | A B
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue and so we do not
+// stop working on p state. In the previous example, we're concerned
+// with states associated with the conflicting alternatives. Here alt
+// 3 is not associated with the conflicting configs, but since we can continue
+// looking for input reasonably, I don't declare the state done. We
+// ignore a set of conflicting alts when we have an alternative
+// that we still need to pursue.
+//
+
+func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet {
+ var conflictingAlts *BitSet
+ if configs.GetUniqueAlt() != ATNInvalidAltNumber {
+ conflictingAlts = NewBitSet()
+ conflictingAlts.add(configs.GetUniqueAlt())
+ } else {
+ conflictingAlts = configs.GetConflictingAlts()
+ }
+ return conflictingAlts
+}
+
+func (p *ParserATNSimulator) GetTokenName(t int) string {
+ if t == TokenEOF {
+ return "EOF"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) {
+ return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) {
+ return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+
+ return strconv.Itoa(t)
+}
+
+func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
+ return p.GetTokenName(input.LA(1))
+}
+
+// Used for debugging in AdaptivePredict around execATN but I cut
+//
+// it out for clarity now that alg. works well. We can leave p
+// "dead" code for a bit.
+func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) {
+
+ panic("Not implemented")
+
+ // fmt.Println("dead end configs: ")
+ // var decs = nvae.deadEndConfigs
+ //
+ // for i:=0; i0) {
+ // var t = c.state.GetTransitions()[0]
+ // if t2, ok := t.(*AtomTransition); ok {
+ // trans = "Atom "+ p.GetTokenName(t2.label)
+ // } else if t3, ok := t.(SetTransition); ok {
+ // _, ok := t.(*NotSetTransition)
+ //
+ // var s string
+ // if (ok){
+ // s = "~"
+ // }
+ //
+ // trans = s + "Set " + t3.set
+ // }
+ // }
+ // fmt.Errorf(c.String(p.parser, true) + ":" + trans)
+ // }
+}
+
+func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException {
+ return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext)
+}
+
+func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
+ alt := ATNInvalidAltNumber
+ for _, c := range configs.GetItems() {
+ if alt == ATNInvalidAltNumber {
+ alt = c.GetAlt() // found first alt
+ } else if c.GetAlt() != alt {
+ return ATNInvalidAltNumber
+ }
+ }
+ return alt
+}
+
+// Add an edge to the DFA, if possible. This method calls
+// {@link //addDFAState} to ensure the {@code to} state is present in the
+// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
+// range of edges that can be represented in the DFA tables, p method
+// returns without adding the edge to the DFA.
+//
+//
If {@code to} is {@code nil}, p method returns {@code nil}.
+// Otherwise, p method returns the {@link DFAState} returned by calling
+// {@link //addDFAState} for the {@code to} state.
+//
+// @param dfa The DFA
+// @param from The source state for the edge
+// @param t The input symbol
+// @param to The target state for the edge
+//
+// @return If {@code to} is {@code nil}, p method returns {@code nil}
+// otherwise p method returns the result of calling {@link //addDFAState}
+// on {@code to}
+func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
+ if ParserATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
+ }
+ if to == nil {
+ return nil
+ }
+ p.atn.stateMu.Lock()
+ to = p.addDFAState(dfa, to) // used existing if possible not incoming
+ p.atn.stateMu.Unlock()
+ if from == nil || t < -1 || t > p.atn.maxTokenType {
+ return to
+ }
+ p.atn.edgeMu.Lock()
+ if from.getEdges() == nil {
+ from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1))
+ }
+ from.setIthEdge(t+1, to) // connect
+ p.atn.edgeMu.Unlock()
+
+ if ParserATNSimulatorDebug {
+ var names []string
+ if p.parser != nil {
+ names = p.parser.GetLiteralNames()
+ }
+
+ fmt.Println("DFA=\n" + dfa.String(names, nil))
+ }
+ return to
+}
+
+// Add state {@code D} to the DFA if it is not already present, and return
+// the actual instance stored in the DFA. If a state equivalent to {@code D}
+// is already in the DFA, the existing state is returned. Otherwise p
+// method returns {@code D} after adding it to the DFA.
+//
+//
If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and
+// does not change the DFA.
+//
+// @param dfa The dfa
+// @param D The DFA state to add
+// @return The state stored in the DFA. This will be either the existing
+// state if {@code D} is already in the DFA, or {@code D} itself if the
+// state was not already present.
+func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
+ if d == ATNSimulatorError {
+ return d
+ }
+ existing, present := dfa.states.Get(d)
+ if present {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Print("addDFAState " + d.String() + " exists")
+ }
+ return existing
+ }
+
+ // The state was not present, so update it with configs
+ //
+ d.stateNumber = dfa.states.Len()
+ if !d.configs.ReadOnly() {
+ d.configs.OptimizeConfigs(p.BaseATNSimulator)
+ d.configs.SetReadOnly(true)
+ }
+ dfa.states.Put(d)
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("addDFAState new " + d.String())
+ }
+
+ return d
+}
+
+func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
+
+// If context sensitive parsing, we know it's ambiguity not conflict//
+func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int,
+ exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
new file mode 100644
index 0000000..1c8cee7
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
@@ -0,0 +1,362 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "reflect"
+ "strconv"
+)
+
+type ParserRuleContext interface {
+ RuleContext
+
+ SetException(RecognitionException)
+
+ AddTokenNode(token Token) *TerminalNodeImpl
+ AddErrorNode(badToken Token) *ErrorNodeImpl
+
+ EnterRule(listener ParseTreeListener)
+ ExitRule(listener ParseTreeListener)
+
+ SetStart(Token)
+ GetStart() Token
+
+ SetStop(Token)
+ GetStop() Token
+
+ AddChild(child RuleContext) RuleContext
+ RemoveLastChild()
+}
+
+type BaseParserRuleContext struct {
+ *BaseRuleContext
+
+ start, stop Token
+ exception RecognitionException
+ children []Tree
+}
+
+func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
+ prc := new(BaseParserRuleContext)
+
+ prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
+
+ prc.RuleIndex = -1
+ // * If we are debugging or building a parse tree for a Visitor,
+ // we need to track all of the tokens and rule invocations associated
+ // with prc rule's context. This is empty for parsing w/o tree constr.
+ // operation because we don't the need to track the details about
+ // how we parse prc rule.
+ // /
+ prc.children = nil
+ prc.start = nil
+ prc.stop = nil
+ // The exception that forced prc rule to return. If the rule successfully
+ // completed, prc is {@code nil}.
+ prc.exception = nil
+
+ return prc
+}
+
+func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
+ prc.exception = e
+}
+
+func (prc *BaseParserRuleContext) GetChildren() []Tree {
+ return prc.children
+}
+
+func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) {
+ // from RuleContext
+ prc.parentCtx = ctx.parentCtx
+ prc.invokingState = ctx.invokingState
+ prc.children = nil
+ prc.start = ctx.start
+ prc.stop = ctx.stop
+}
+
+func (prc *BaseParserRuleContext) GetText() string {
+ if prc.GetChildCount() == 0 {
+ return ""
+ }
+
+ var s string
+ for _, child := range prc.children {
+ s += child.(ParseTree).GetText()
+ }
+
+ return s
+}
+
+// Double dispatch methods for listeners
+func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
+}
+
+func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
+}
+
+// * Does not set parent link other add methods do that///
+func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
+ if prc.children == nil {
+ prc.children = make([]Tree, 0)
+ }
+ if child == nil {
+ panic("Child may not be null")
+ }
+ prc.children = append(prc.children, child)
+ return child
+}
+
+func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
+ if prc.children == nil {
+ prc.children = make([]Tree, 0)
+ }
+ if child == nil {
+ panic("Child may not be null")
+ }
+ prc.children = append(prc.children, child)
+ return child
+}
+
+// * Used by EnterOuterAlt to toss out a RuleContext previously added as
+// we entered a rule. If we have // label, we will need to remove
+// generic ruleContext object.
+// /
+func (prc *BaseParserRuleContext) RemoveLastChild() {
+ if prc.children != nil && len(prc.children) > 0 {
+ prc.children = prc.children[0 : len(prc.children)-1]
+ }
+}
+
+func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl {
+
+ node := NewTerminalNodeImpl(token)
+ prc.addTerminalNodeChild(node)
+ node.parentCtx = prc
+ return node
+
+}
+
+func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl {
+ node := NewErrorNodeImpl(badToken)
+ prc.addTerminalNodeChild(node)
+ node.parentCtx = prc
+ return node
+}
+
+func (prc *BaseParserRuleContext) GetChild(i int) Tree {
+ if prc.children != nil && len(prc.children) >= i {
+ return prc.children[i]
+ }
+
+ return nil
+}
+
+func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext {
+ if childType == nil {
+ return prc.GetChild(i).(RuleContext)
+ }
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if reflect.TypeOf(child) == childType {
+ if i == 0 {
+ return child.(RuleContext)
+ }
+
+ i--
+ }
+ }
+
+ return nil
+}
+
+func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string {
+ return TreesStringTree(prc, ruleNames, recog)
+}
+
+func (prc *BaseParserRuleContext) GetRuleContext() RuleContext {
+ return prc
+}
+
+func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} {
+ return visitor.VisitChildren(prc)
+}
+
+func (prc *BaseParserRuleContext) SetStart(t Token) {
+ prc.start = t
+}
+
+func (prc *BaseParserRuleContext) GetStart() Token {
+ return prc.start
+}
+
+func (prc *BaseParserRuleContext) SetStop(t Token) {
+ prc.stop = t
+}
+
+func (prc *BaseParserRuleContext) GetStop() Token {
+ return prc.stop
+}
+
+func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode {
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if c2, ok := child.(TerminalNode); ok {
+ if c2.GetSymbol().GetTokenType() == ttype {
+ if i == 0 {
+ return c2
+ }
+
+ i--
+ }
+ }
+ }
+ return nil
+}
+
+func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode {
+ if prc.children == nil {
+ return make([]TerminalNode, 0)
+ }
+
+ tokens := make([]TerminalNode, 0)
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if tchild, ok := child.(TerminalNode); ok {
+ if tchild.GetSymbol().GetTokenType() == ttype {
+ tokens = append(tokens, tchild)
+ }
+ }
+ }
+
+ return tokens
+}
+
+func (prc *BaseParserRuleContext) GetPayload() interface{} {
+ return prc
+}
+
+func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext {
+ if prc.children == nil || i < 0 || i >= len(prc.children) {
+ return nil
+ }
+
+ j := -1 // what element have we found with ctxType?
+ for _, o := range prc.children {
+
+ childType := reflect.TypeOf(o)
+
+ if childType.Implements(ctxType) {
+ j++
+ if j == i {
+ return o.(RuleContext)
+ }
+ }
+ }
+ return nil
+}
+
+// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do
+// check for convertibility
+
+func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext {
+ return prc.getChild(ctxType, i)
+}
+
+func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext {
+ if prc.children == nil {
+ return make([]RuleContext, 0)
+ }
+
+ contexts := make([]RuleContext, 0)
+
+ for _, child := range prc.children {
+ childType := reflect.TypeOf(child)
+
+ if childType.ConvertibleTo(ctxType) {
+ contexts = append(contexts, child.(RuleContext))
+ }
+ }
+ return contexts
+}
+
+func (prc *BaseParserRuleContext) GetChildCount() int {
+ if prc.children == nil {
+ return 0
+ }
+
+ return len(prc.children)
+}
+
+func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
+ if prc.start == nil || prc.stop == nil {
+ return TreeInvalidInterval
+ }
+
+ return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex())
+}
+
+//need to manage circular dependencies, so export now
+
+// Print out a whole tree, not just a node, in LISP format
+// (root child1 .. childN). Print just a node if b is a leaf.
+//
+
+func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string {
+
+ var p ParserRuleContext = prc
+ s := "["
+ for p != nil && p != stop {
+ if ruleNames == nil {
+ if !p.IsEmpty() {
+ s += strconv.Itoa(p.GetInvokingState())
+ }
+ } else {
+ ri := p.GetRuleIndex()
+ var ruleName string
+ if ri >= 0 && ri < len(ruleNames) {
+ ruleName = ruleNames[ri]
+ } else {
+ ruleName = strconv.Itoa(ri)
+ }
+ s += ruleName
+ }
+ if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) {
+ s += " "
+ }
+ pi := p.GetParent()
+ if pi != nil {
+ p = pi.(ParserRuleContext)
+ } else {
+ p = nil
+ }
+ }
+ s += "]"
+ return s
+}
+
+var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1)
+
+type InterpreterRuleContext interface {
+ ParserRuleContext
+}
+
+type BaseInterpreterRuleContext struct {
+ *BaseParserRuleContext
+}
+
+func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
+
+ prc := new(BaseInterpreterRuleContext)
+
+ prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber)
+
+ prc.RuleIndex = ruleIndex
+
+ return prc
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
new file mode 100644
index 0000000..ba62af3
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
@@ -0,0 +1,806 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "golang.org/x/exp/slices"
+ "strconv"
+)
+
+// Represents {@code $} in local context prediction, which means wildcard.
+// {@code//+x =//}.
+// /
+const (
+ BasePredictionContextEmptyReturnState = 0x7FFFFFFF
+)
+
+// Represents {@code $} in an array in full context mode, when {@code $}
+// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
+// {@code $} = {@link //EmptyReturnState}.
+// /
+
+var (
+ BasePredictionContextglobalNodeCount = 1
+ BasePredictionContextid = BasePredictionContextglobalNodeCount
+)
+
+type PredictionContext interface {
+ Hash() int
+ Equals(interface{}) bool
+ GetParent(int) PredictionContext
+ getReturnState(int) int
+ length() int
+ isEmpty() bool
+ hasEmptyPath() bool
+ String() string
+}
+
+type BasePredictionContext struct {
+ cachedHash int
+}
+
+func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
+ pc := new(BasePredictionContext)
+ pc.cachedHash = cachedHash
+
+ return pc
+}
+
+func (b *BasePredictionContext) isEmpty() bool {
+ return false
+}
+
+func calculateHash(parent PredictionContext, returnState int) int {
+ h := murmurInit(1)
+ h = murmurUpdate(h, parent.Hash())
+ h = murmurUpdate(h, returnState)
+ return murmurFinish(h, 2)
+}
+
+var _emptyPredictionContextHash int
+
+func init() {
+ _emptyPredictionContextHash = murmurInit(1)
+ _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
+}
+
+func calculateEmptyHash() int {
+ return _emptyPredictionContextHash
+}
+
+// Used to cache {@link BasePredictionContext} objects. Its used for the shared
+// context cash associated with contexts in DFA states. This cache
+// can be used for both lexers and parsers.
+
+type PredictionContextCache struct {
+ cache map[PredictionContext]PredictionContext
+}
+
+func NewPredictionContextCache() *PredictionContextCache {
+ t := new(PredictionContextCache)
+ t.cache = make(map[PredictionContext]PredictionContext)
+ return t
+}
+
+// Add a context to the cache and return it. If the context already exists,
+// return that one instead and do not add a Newcontext to the cache.
+// Protect shared cache from unsafe thread access.
+func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
+ if ctx == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY
+ }
+ existing := p.cache[ctx]
+ if existing != nil {
+ return existing
+ }
+ p.cache[ctx] = ctx
+ return ctx
+}
+
+func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
+ return p.cache[ctx]
+}
+
+func (p *PredictionContextCache) length() int {
+ return len(p.cache)
+}
+
+type SingletonPredictionContext interface {
+ PredictionContext
+}
+
+type BaseSingletonPredictionContext struct {
+ *BasePredictionContext
+
+ parentCtx PredictionContext
+ returnState int
+}
+
+func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
+ var cachedHash int
+ if parent != nil {
+ cachedHash = calculateHash(parent, returnState)
+ } else {
+ cachedHash = calculateEmptyHash()
+ }
+
+ s := new(BaseSingletonPredictionContext)
+ s.BasePredictionContext = NewBasePredictionContext(cachedHash)
+
+ s.parentCtx = parent
+ s.returnState = returnState
+
+ return s
+}
+
+func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
+ if returnState == BasePredictionContextEmptyReturnState && parent == nil {
+ // someone can pass in the bits of an array ctx that mean $
+ return BasePredictionContextEMPTY
+ }
+
+ return NewBaseSingletonPredictionContext(parent, returnState)
+}
+
+func (b *BaseSingletonPredictionContext) length() int {
+ return 1
+}
+
+func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
+ return b.parentCtx
+}
+
+func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
+ return b.returnState
+}
+
+func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
+ return b.returnState == BasePredictionContextEmptyReturnState
+}
+
+func (b *BaseSingletonPredictionContext) Hash() int {
+ return b.cachedHash
+}
+
+func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool {
+ if b == other {
+ return true
+ }
+ if _, ok := other.(*BaseSingletonPredictionContext); !ok {
+ return false
+ }
+
+ otherP := other.(*BaseSingletonPredictionContext)
+
+ if b.returnState != otherP.getReturnState(0) {
+ return false
+ }
+ if b.parentCtx == nil {
+ return otherP.parentCtx == nil
+ }
+
+ return b.parentCtx.Equals(otherP.parentCtx)
+}
+
+func (b *BaseSingletonPredictionContext) String() string {
+ var up string
+
+ if b.parentCtx == nil {
+ up = ""
+ } else {
+ up = b.parentCtx.String()
+ }
+
+ if len(up) == 0 {
+ if b.returnState == BasePredictionContextEmptyReturnState {
+ return "$"
+ }
+
+ return strconv.Itoa(b.returnState)
+ }
+
+ return strconv.Itoa(b.returnState) + " " + up
+}
+
+var BasePredictionContextEMPTY = NewEmptyPredictionContext()
+
+type EmptyPredictionContext struct {
+ *BaseSingletonPredictionContext
+}
+
+func NewEmptyPredictionContext() *EmptyPredictionContext {
+
+ p := new(EmptyPredictionContext)
+
+ p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
+ p.cachedHash = calculateEmptyHash()
+ return p
+}
+
+func (e *EmptyPredictionContext) isEmpty() bool {
+ return true
+}
+
+func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
+ return nil
+}
+
+func (e *EmptyPredictionContext) getReturnState(index int) int {
+ return e.returnState
+}
+
+func (e *EmptyPredictionContext) Hash() int {
+ return e.cachedHash
+}
+
+func (e *EmptyPredictionContext) Equals(other interface{}) bool {
+ return e == other
+}
+
+func (e *EmptyPredictionContext) String() string {
+ return "$"
+}
+
+type ArrayPredictionContext struct {
+ *BasePredictionContext
+
+ parents []PredictionContext
+ returnStates []int
+}
+
+func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
+ // Parent can be nil only if full ctx mode and we make an array
+ // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
+ // nil parent and
+ // returnState == {@link //EmptyReturnState}.
+ hash := murmurInit(1)
+
+ for _, parent := range parents {
+ hash = murmurUpdate(hash, parent.Hash())
+ }
+
+ for _, returnState := range returnStates {
+ hash = murmurUpdate(hash, returnState)
+ }
+
+ hash = murmurFinish(hash, len(parents)<<1)
+
+ c := new(ArrayPredictionContext)
+ c.BasePredictionContext = NewBasePredictionContext(hash)
+
+ c.parents = parents
+ c.returnStates = returnStates
+
+ return c
+}
+
+func (a *ArrayPredictionContext) GetReturnStates() []int {
+ return a.returnStates
+}
+
+func (a *ArrayPredictionContext) hasEmptyPath() bool {
+ return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
+}
+
+func (a *ArrayPredictionContext) isEmpty() bool {
+ // since EmptyReturnState can only appear in the last position, we
+ // don't need to verify that size==1
+ return a.returnStates[0] == BasePredictionContextEmptyReturnState
+}
+
+func (a *ArrayPredictionContext) length() int {
+ return len(a.returnStates)
+}
+
+func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
+ return a.parents[index]
+}
+
+func (a *ArrayPredictionContext) getReturnState(index int) int {
+ return a.returnStates[index]
+}
+
+// Equals is the default comparison function for ArrayPredictionContext when no specialized
+// implementation is needed for a collection
+func (a *ArrayPredictionContext) Equals(o interface{}) bool {
+ if a == o {
+ return true
+ }
+ other, ok := o.(*ArrayPredictionContext)
+ if !ok {
+ return false
+ }
+ if a.cachedHash != other.Hash() {
+ return false // can't be same if hash is different
+ }
+
+ // Must compare the actual array elements and not just the array address
+ //
+ return slices.Equal(a.returnStates, other.returnStates) &&
+ slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool {
+ return x.Equals(y)
+ })
+}
+
+// Hash is the default hash function for ArrayPredictionContext when no specialized
+// implementation is needed for a collection
+func (a *ArrayPredictionContext) Hash() int {
+ return a.BasePredictionContext.cachedHash
+}
+
+func (a *ArrayPredictionContext) String() string {
+ if a.isEmpty() {
+ return "[]"
+ }
+
+ s := "["
+ for i := 0; i < len(a.returnStates); i++ {
+ if i > 0 {
+ s = s + ", "
+ }
+ if a.returnStates[i] == BasePredictionContextEmptyReturnState {
+ s = s + "$"
+ continue
+ }
+ s = s + strconv.Itoa(a.returnStates[i])
+ if a.parents[i] != nil {
+ s = s + " " + a.parents[i].String()
+ } else {
+ s = s + "nil"
+ }
+ }
+
+ return s + "]"
+}
+
+// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
+// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
+// /
+func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ // if we are in RuleContext of start rule, s, then BasePredictionContext
+ // is EMPTY. Nobody called us. (if we are empty, return empty)
+ if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
+ return BasePredictionContextEMPTY
+ }
+ // If we have a parent, convert it to a BasePredictionContext graph
+ parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
+ state := a.states[outerContext.GetInvokingState()]
+ transition := state.GetTransitions()[0]
+
+ return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
+}
+
+func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+
+ // Share same graph if both same
+ //
+ if a == b || a.Equals(b) {
+ return a
+ }
+
+ // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test
+ // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created
+ // from it.
+ // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion
+ // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from
+ // either of them.
+
+ ac, ok1 := a.(*BaseSingletonPredictionContext)
+ bc, ok2 := b.(*BaseSingletonPredictionContext)
+
+ if ok1 && ok2 {
+ return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
+ }
+ // At least one of a or b is array
+ // If one is $ and rootIsWildcard, return $ as// wildcard
+ if rootIsWildcard {
+ if _, ok := a.(*EmptyPredictionContext); ok {
+ return a
+ }
+ if _, ok := b.(*EmptyPredictionContext); ok {
+ return b
+ }
+ }
+
+ // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters
+ // here.
+ //
+ // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here
+
+ var arp, arb *ArrayPredictionContext
+ var ok bool
+ if arp, ok = a.(*ArrayPredictionContext); ok {
+ } else if _, ok = a.(*BaseSingletonPredictionContext); ok {
+ arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
+ } else if _, ok = a.(*EmptyPredictionContext); ok {
+ arp = NewArrayPredictionContext([]PredictionContext{}, []int{})
+ }
+
+ if arb, ok = b.(*ArrayPredictionContext); ok {
+ } else if _, ok = b.(*BaseSingletonPredictionContext); ok {
+ arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
+ } else if _, ok = b.(*EmptyPredictionContext); ok {
+ arb = NewArrayPredictionContext([]PredictionContext{}, []int{})
+ }
+
+ // Both arp and arb
+ return mergeArrays(arp, arb, rootIsWildcard, mergeCache)
+}
+
+// Merge two {@link SingletonBasePredictionContext} instances.
+//
+//
Stack tops equal, parents merge is same return left graph.
+//
+//
+//
Same stack top, parents differ merge parents giving array node, then
+// remainders of those graphs. A Newroot node is created to point to the
+// merged parents.
+//
+//
+//
Different stack tops pointing to same parent. Make array node for the
+// root where both element in the root point to the same (original)
+// parent.
+//
+//
+//
Different stack tops pointing to different parents. Make array node for
+// the root where each element points to the corresponding original
+// parent.
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// @param mergeCache
+// /
+func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ if mergeCache != nil {
+ previous := mergeCache.Get(a.Hash(), b.Hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ previous = mergeCache.Get(b.Hash(), a.Hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ }
+
+ rootMerge := mergeRoot(a, b, rootIsWildcard)
+ if rootMerge != nil {
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), rootMerge)
+ }
+ return rootMerge
+ }
+ if a.returnState == b.returnState {
+ parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
+ // if parent is same as existing a or b parent or reduced to a parent,
+ // return it
+ if parent == a.parentCtx {
+ return a // ax + bx = ax, if a=b
+ }
+ if parent == b.parentCtx {
+ return b // ax + bx = bx, if a=b
+ }
+ // else: ax + ay = a'[x,y]
+ // merge parents x and y, giving array node with x,y then remainders
+ // of those graphs. dup a, a' points at merged array
+ // Newjoined parent so create Newsingleton pointing to it, a'
+ spc := SingletonBasePredictionContextCreate(parent, a.returnState)
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), spc)
+ }
+ return spc
+ }
+ // a != b payloads differ
+ // see if we can collapse parents due to $+x parents if local ctx
+ var singleParent PredictionContext
+ if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
+ // bx =
+ // [a,b]x
+ singleParent = a.parentCtx
+ }
+ if singleParent != nil { // parents are same
+ // sort payloads and use same parent
+ payloads := []int{a.returnState, b.returnState}
+ if a.returnState > b.returnState {
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ }
+ parents := []PredictionContext{singleParent, singleParent}
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), apc)
+ }
+ return apc
+ }
+ // parents differ and can't merge them. Just pack together
+ // into array can't merge.
+ // ax + by = [ax,by]
+ payloads := []int{a.returnState, b.returnState}
+ parents := []PredictionContext{a.parentCtx, b.parentCtx}
+ if a.returnState > b.returnState { // sort by payload
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ parents = []PredictionContext{b.parentCtx, a.parentCtx}
+ }
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), apc)
+ }
+ return apc
+}
+
+// Handle case where at least one of {@code a} or {@code b} is
+// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
+// to represent {@link //EMPTY}.
+//
+//
Local-Context Merges
+//
+//
These local-context merge operations are used when {@code rootIsWildcard}
+// is true.
+//
+//
{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//
+//
+//
{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
+// {@code //EMPTY} return left graph.
+//
+//
+//
Special case of last merge if local context.
+//
+//
+//
Full-Context Merges
+//
+//
These full-context merge operations are used when {@code rootIsWildcard}
+// is false.
+//
+//
+//
+//
Must keep all contexts {@link //EMPTY} in array is a special value (and
+// nil parent).
+//
+//
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// /
+func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
+ if rootIsWildcard {
+ if a == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // // + b =//
+ }
+ if b == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // a +// =//
+ }
+ } else {
+ if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // $ + $ = $
+ } else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
+ payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []PredictionContext{b.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
+ payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []PredictionContext{a.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ }
+ }
+ return nil
+}
+
+// Merge two {@link ArrayBasePredictionContext} instances.
+//
+//
Different tops, different parents.
+//
+//
+//
Shared top, same parents.
+//
+//
+//
Shared top, different parents.
+//
+//
+//
Shared top, all shared parents.
+//
+//
+//
Equal tops, merge parents and reduce top to
+// {@link SingletonBasePredictionContext}.
+//
+// /
+func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ if mergeCache != nil {
+ previous := mergeCache.Get(a.Hash(), b.Hash())
+ if previous != nil {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous.(PredictionContext)
+ }
+ previous = mergeCache.Get(b.Hash(), a.Hash())
+ if previous != nil {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous.(PredictionContext)
+ }
+ }
+ // merge sorted payloads a + b => M
+ i := 0 // walks a
+ j := 0 // walks b
+ k := 0 // walks target M array
+
+ mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
+ mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
+ // walk and merge to yield mergedParents, mergedReturnStates
+ for i < len(a.returnStates) && j < len(b.returnStates) {
+ aParent := a.parents[i]
+ bParent := b.parents[j]
+ if a.returnStates[i] == b.returnStates[j] {
+ // same payload (stack tops are equal), must yield merged singleton
+ payload := a.returnStates[i]
+ // $+$ = $
+ bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
+ axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax
+ // ->
+ // ax
+ if bothDollars || axAX {
+ mergedParents[k] = aParent // choose left
+ mergedReturnStates[k] = payload
+ } else { // ax+ay -> a'[x,y]
+ mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
+ mergedParents[k] = mergedParent
+ mergedReturnStates[k] = payload
+ }
+ i++ // hop over left one as usual
+ j++ // but also Skip one in right side since we merge
+ } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
+ mergedParents[k] = aParent
+ mergedReturnStates[k] = a.returnStates[i]
+ i++
+ } else { // b > a, copy b[j] to M
+ mergedParents[k] = bParent
+ mergedReturnStates[k] = b.returnStates[j]
+ j++
+ }
+ k++
+ }
+ // copy over any payloads remaining in either array
+ if i < len(a.returnStates) {
+ for p := i; p < len(a.returnStates); p++ {
+ mergedParents[k] = a.parents[p]
+ mergedReturnStates[k] = a.returnStates[p]
+ k++
+ }
+ } else {
+ for p := j; p < len(b.returnStates); p++ {
+ mergedParents[k] = b.parents[p]
+ mergedReturnStates[k] = b.returnStates[p]
+ k++
+ }
+ }
+ // trim merged if we combined a few that had same stack tops
+ if k < len(mergedParents) { // write index < last position trim
+ if k == 1 { // for just one merged element, return singleton top
+ pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), pc)
+ }
+ return pc
+ }
+ mergedParents = mergedParents[0:k]
+ mergedReturnStates = mergedReturnStates[0:k]
+ }
+
+ M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
+
+ // if we created same array as a or b, return that instead
+ // TODO: track whether this is possible above during merge sort for speed
+ // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems
+ if M == a {
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), a)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
+ }
+ return a
+ }
+ if M == b {
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), b)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
+ }
+ return b
+ }
+ combineCommonParents(mergedParents)
+
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), M)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
+ }
+ return M
+}
+
+// Make pass over all M {@code parents} merge any {@code equals()}
+// ones.
+// /
+func combineCommonParents(parents []PredictionContext) {
+ uniqueParents := make(map[PredictionContext]PredictionContext)
+
+ for p := 0; p < len(parents); p++ {
+ parent := parents[p]
+ if uniqueParents[parent] == nil {
+ uniqueParents[parent] = parent
+ }
+ }
+ for q := 0; q < len(parents); q++ {
+ parents[q] = uniqueParents[parents[q]]
+ }
+}
+
+func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
+
+ if context.isEmpty() {
+ return context
+ }
+ existing := visited[context]
+ if existing != nil {
+ return existing
+ }
+ existing = contextCache.Get(context)
+ if existing != nil {
+ visited[context] = existing
+ return existing
+ }
+ changed := false
+ parents := make([]PredictionContext, context.length())
+ for i := 0; i < len(parents); i++ {
+ parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
+ if changed || parent != context.GetParent(i) {
+ if !changed {
+ parents = make([]PredictionContext, context.length())
+ for j := 0; j < context.length(); j++ {
+ parents[j] = context.GetParent(j)
+ }
+ changed = true
+ }
+ parents[i] = parent
+ }
+ }
+ if !changed {
+ contextCache.add(context)
+ visited[context] = context
+ return context
+ }
+ var updated PredictionContext
+ if len(parents) == 0 {
+ updated = BasePredictionContextEMPTY
+ } else if len(parents) == 1 {
+ updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
+ } else {
+ updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
+ }
+ contextCache.add(updated)
+ visited[updated] = updated
+ visited[context] = updated
+
+ return updated
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
new file mode 100644
index 0000000..7b9b72f
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
@@ -0,0 +1,529 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// This enumeration defines the prediction modes available in ANTLR 4 along with
+// utility methods for analyzing configuration sets for conflicts and/or
+// ambiguities.
+
+const (
+ //
+ // The SLL(*) prediction mode. This prediction mode ignores the current
+ // parser context when making predictions. This is the fastest prediction
+ // mode, and provides correct results for many grammars. This prediction
+ // mode is more powerful than the prediction mode provided by ANTLR 3, but
+ // may result in syntax errors for grammar and input combinations which are
+ // not SLL.
+ //
+ //
+ // When using this prediction mode, the parser will either return a correct
+ // parse tree (i.e. the same parse tree that would be returned with the
+ // {@link //LL} prediction mode), or it will Report a syntax error. If a
+ // syntax error is encountered when using the {@link //SLL} prediction mode,
+ // it may be due to either an actual syntax error in the input or indicate
+ // that the particular combination of grammar and input requires the more
+ // powerful {@link //LL} prediction abilities to complete successfully.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeSLL = 0
+ //
+ // The LL(*) prediction mode. This prediction mode allows the current parser
+ // context to be used for resolving SLL conflicts that occur during
+ // prediction. This is the fastest prediction mode that guarantees correct
+ // parse results for all combinations of grammars with syntactically correct
+ // inputs.
+ //
+ //
+ // When using this prediction mode, the parser will make correct decisions
+ // for all syntactically-correct grammar and input combinations. However, in
+ // cases where the grammar is truly ambiguous this prediction mode might not
+ // Report a precise answer for exactly which alternatives are
+ // ambiguous.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLL = 1
+ //
+ // The LL(*) prediction mode with exact ambiguity detection. In addition to
+ // the correctness guarantees provided by the {@link //LL} prediction mode,
+ // this prediction mode instructs the prediction algorithm to determine the
+ // complete and exact set of ambiguous alternatives for every ambiguous
+ // decision encountered while parsing.
+ //
+ //
+ // This prediction mode may be used for diagnosing ambiguities during
+ // grammar development. Due to the performance overhead of calculating sets
+ // of ambiguous alternatives, this prediction mode should be avoided when
+ // the exact results are not necessary.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+// This method computes the SLL prediction termination condition for both of
+// the following cases.
+//
+//
+//
The usual SLL+LL fallback upon SLL conflict
+//
Pure SLL without LL fallback
+//
+//
+//
COMBINED SLL+LL PARSING
+//
+//
When LL-fallback is enabled upon SLL conflict, correct predictions are
+// ensured regardless of how the termination condition is computed by this
+// method. Due to the substantially higher cost of LL prediction, the
+// prediction should only fall back to LL when the additional lookahead
+// cannot lead to a unique SLL prediction.
+//
+//
Assuming combined SLL+LL parsing, an SLL configuration set with only
+// conflicting subsets should fall back to full LL, even if the
+// configuration sets don't resolve to the same alternative (e.g.
+// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
+// configuration, SLL could continue with the hopes that more lookahead will
+// resolve via one of those non-conflicting configurations.
+//
+//
Here's the prediction termination rule them: SLL (for SLL+LL parsing)
+// stops when it sees only conflicting configuration subsets. In contrast,
+// full LL keeps going when there is uncertainty.
+//
+//
HEURISTIC
+//
+//
As a heuristic, we stop prediction when we see any conflicting subset
+// unless we see a state that only has one alternative associated with it.
+// The single-alt-state thing lets prediction continue upon rules like
+// (otherwise, it would admit defeat too soon):
+//
+//
{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }
+//
+//
When the ATN simulation reaches the state before {@code ”}, it has a
+// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
+// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
+// processing this node because alternative to has another way to continue,
+// via {@code [6|2|[]]}.
+//
+//
It also let's us continue for this rule:
+//
+//
{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }
+//
+//
After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue and so we do not stop
+// working on this state. In the previous example, we're concerned with
+// states associated with the conflicting alternatives. Here alt 3 is not
+// associated with the conflicting configs, but since we can continue
+// looking for input reasonably, don't declare the state done.
+//
+//
PURE SLL PARSING
+//
+//
To handle pure SLL parsing, all we have to do is make sure that we
+// combine stack contexts for configurations that differ only by semantic
+// predicate. From there, we can do the usual SLL termination heuristic.
+//
+//
PREDICATES IN SLL+LL PARSING
+//
+//
SLL decisions don't evaluate predicates until after they reach DFA stop
+// states because they need to create the DFA cache that works in all
+// semantic situations. In contrast, full LL evaluates predicates collected
+// during start state computation so it can ignore predicates thereafter.
+// This means that SLL termination detection can totally ignore semantic
+// predicates.
+//
+//
Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
+// semantic predicate contexts so we might see two configurations like the
+// following.
+//
+//
{@code (s, 1, x, {}), (s, 1, x', {p})}
+//
+//
Before testing these configurations against others, we have to merge
+// {@code x} and {@code x'} (without modifying the existing configurations).
+// For example, we test {@code (x+x')==x”} when looking for conflicts in
+// the following configurations.
If the configuration set has predicates (as indicated by
+// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
+// the configurations to strip out all of the predicates so that a standard
+// {@link ATNConfigSet} will merge everything ignoring predicates.
+func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
+ // Configs in rule stop states indicate reaching the end of the decision
+ // rule (local context) or end of start rule (full context). If all
+ // configs meet this condition, then none of the configurations is able
+ // to Match additional input so we terminate prediction.
+ //
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return true
+ }
+ // pure SLL mode parsing
+ if mode == PredictionModeSLL {
+ // Don't bother with combining configs from different semantic
+ // contexts if we can fail over to full LL costs more time
+ // since we'll often fail over anyway.
+ if configs.HasSemanticContext() {
+ // dup configs, tossing out semantic predicates
+ dup := NewBaseATNConfigSet(false)
+ for _, c := range configs.GetItems() {
+
+ // NewBaseATNConfig({semanticContext:}, c)
+ c = NewBaseATNConfig2(c, SemanticContextNone)
+ dup.Add(c, nil)
+ }
+ configs = dup
+ }
+ // now we have combined contexts for configs with dissimilar preds
+ }
+ // pure SLL or combined SLL+LL mode parsing
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
+}
+
+// Checks if any configuration in {@code configs} is in a
+// {@link RuleStopState}. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// @param configs the configuration set to test
+// @return {@code true} if any configuration in {@code configs} is in a
+// {@link RuleStopState}, otherwise {@code false}
+func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
+ for _, c := range configs.GetItems() {
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// Checks if all configurations in {@code configs} are in a
+// {@link RuleStopState}. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// @param configs the configuration set to test
+// @return {@code true} if all configurations in {@code configs} are in a
+// {@link RuleStopState}, otherwise {@code false}
+func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
+
+ for _, c := range configs.GetItems() {
+ if _, ok := c.GetState().(*RuleStopState); !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// Full LL prediction termination.
+//
+//
Can we stop looking ahead during ATN simulation or is there some
+// uncertainty as to which alternative we will ultimately pick, after
+// consuming more input? Even if there are partial conflicts, we might know
+// that everything is going to resolve to the same minimum alternative. That
+// means we can stop since no more lookahead will change that fact. On the
+// other hand, there might be multiple conflicts that resolve to different
+// minimums. That means we need more look ahead to decide which of those
+// alternatives we should predict.
+//
+//
The basic idea is to split the set of configurations {@code C}, into
+// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
+// non-conflicting configurations. Two configurations conflict if they have
+// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
+// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
+// and {@code (s, j, ctx, _)} for {@code i!=j}.
+//
+//
Reduce these configuration subsets to the set of possible alternatives.
+// You can compute the alternative subsets in one pass as follows:
+//
+//
{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
+// {@code C} holding {@code s} and {@code ctx} fixed.
+//
+//
Or in pseudo-code, for each configuration {@code c} in {@code C}:
+//
+//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+//
+//
+//
The values in {@code map} are the set of {@code A_s,ctx} sets.
+//
+//
If {@code |A_s,ctx|=1} then there is no conflict associated with
+// {@code s} and {@code ctx}.
+//
+//
Reduce the subsets to singletons by choosing a minimum of each subset. If
+// the union of these alternative subsets is a singleton, then no amount of
+// more lookahead will help us. We will always pick that alternative. If,
+// however, there is more than one alternative, then we are uncertain which
+// alternative to predict and must continue looking for resolution. We may
+// or may not discover an ambiguity in the future, even if there are no
+// conflicting subsets this round.
+//
+//
The biggest sin is to terminate early because it means we've made a
+// decision but were uncertain as to the eventual outcome. We haven't used
+// enough lookahead. On the other hand, announcing a conflict too late is no
+// big deal you will still have the conflict. It's just inefficient. It
+// might even look until the end of file.
+//
+//
No special consideration for semantic predicates is required because
+// predicates are evaluated on-the-fly for full LL prediction, ensuring that
+// no configuration contains a semantic context during the termination
+// check.
+//
+//
CONFLICTING CONFIGS
+//
+//
Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
+// when {@code i!=j} but {@code x=x'}. Because we merge all
+// {@code (s, i, _)} configurations together, that means that there are at
+// most {@code n} configurations associated with state {@code s} for
+// {@code n} possible alternatives in the decision. The merged stacks
+// complicate the comparison of configuration contexts {@code x} and
+// {@code x'}. Sam checks to see if one is a subset of the other by calling
+// merge and checking to see if the merged result is either {@code x} or
+// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
+// is the superset, then {@code i} is the only possible prediction since the
+// others resolve to {@code min(i)} as well. However, if {@code x} is
+// associated with {@code j>i} then at least one stack configuration for
+// {@code j} is not in conflict with alternative {@code i}. The algorithm
+// should keep going, looking for more lookahead due to the uncertainty.
+//
+//
For simplicity, I'm doing a equality check between {@code x} and
+// {@code x'} that lets the algorithm continue to consume lookahead longer
+// than necessary. The reason I like the equality is of course the
+// simplicity but also because that is the test you need to detect the
+// alternatives that are actually in conflict.
+//
+//
CONTINUE/STOP RULE
+//
+//
Continue if union of resolved alternative sets from non-conflicting and
+// conflicting alternative subsets has more than one alternative. We are
+// uncertain about which alternative to predict.
+//
+//
The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
+// alternatives are still in the running for the amount of input we've
+// consumed at this point. The conflicting sets let us to strip away
+// configurations that won't lead to more states because we resolve
+// conflicts to the configuration with a minimum alternate for the
+// conflicting set.
+//
+//
CASES
+//
+//
+//
+//
no conflicts and more than 1 alternative in set => continue
If all states Report the same conflicting set of alternatives, then we
+// know we have the exact ambiguity set.
+//
+//
|A_i|>1 and
+// A_i = A_j for all i, j.
+//
+//
In other words, we continue examining lookahead until all {@code A_i}
+// have more than one alternative and all {@code A_i} are the same. If
+// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
+// because the resolved set is {@code {1}}. To determine what the real
+// ambiguity is, we have to know whether the ambiguity is between one and
+// two or one and three so we keep going. We can only stop prediction when
+// we need exact ambiguity detection when the sets look like
+// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
+func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
+ return PredictionModegetSingleViableAlt(altsets)
+}
+
+// Determines if every alternative subset in {@code altsets} contains more
+// than one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if every {@link BitSet} in {@code altsets} has
+// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
+func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
+ return !PredictionModehasNonConflictingAltSet(altsets)
+}
+
+// Determines if any single alternative subset in {@code altsets} contains
+// exactly one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if {@code altsets} contains a {@link BitSet} with
+// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
+func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// Determines if any single alternative subset in {@code altsets} contains
+// more than one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if {@code altsets} contains a {@link BitSet} with
+// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
+func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// Determines if every alternative subset in {@code altsets} is equivalent.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if every member of {@code altsets} is equal to the
+// others, otherwise {@code false}
+func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
+ var first *BitSet
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if first == nil {
+ first = alts
+ } else if alts != first {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Returns the unique alternative predicted by all alternative subsets in
+// {@code altsets}. If no such alternative exists, this method returns
+// {@link ATN//INVALID_ALT_NUMBER}.
+//
+// @param altsets a collection of alternative subsets
+func PredictionModegetUniqueAlt(altsets []*BitSet) int {
+ all := PredictionModeGetAlts(altsets)
+ if all.length() == 1 {
+ return all.minValue()
+ }
+
+ return ATNInvalidAltNumber
+}
+
+// Gets the complete set of represented alternatives for a collection of
+// alternative subsets. This method returns the union of each {@link BitSet}
+// in {@code altsets}.
+//
+// @param altsets a collection of alternative subsets
+// @return the set of represented alternatives in {@code altsets}
+func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
+ all := NewBitSet()
+ for _, alts := range altsets {
+ all.or(alts)
+ }
+ return all
+}
+
+// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
+// For each configuration {@code c} in {@code configs}:
+//
+//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+//
+func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
+ configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst)
+
+ for _, c := range configs.GetItems() {
+
+ alts, ok := configToAlts.Get(c)
+ if !ok {
+ alts = NewBitSet()
+ configToAlts.Put(c, alts)
+ }
+ alts.add(c.GetAlt())
+ }
+
+ return configToAlts.Values()
+}
+
+// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each
+// configuration {@code c} in {@code configs}:
+//
+//
+func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
+ m := NewAltDict()
+
+ for _, c := range configs.GetItems() {
+ alts := m.Get(c.GetState().String())
+ if alts == nil {
+ alts = NewBitSet()
+ m.put(c.GetState().String(), alts)
+ }
+ alts.(*BitSet).add(c.GetAlt())
+ }
+ return m
+}
+
+func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
+ values := PredictionModeGetStateToAltMap(configs).values()
+ for i := 0; i < len(values); i++ {
+ if values[i].(*BitSet).length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
+ result := ATNInvalidAltNumber
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ minAlt := alts.minValue()
+ if result == ATNInvalidAltNumber {
+ result = minAlt
+ } else if result != minAlt { // more than 1 viable alt
+ return ATNInvalidAltNumber
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
new file mode 100644
index 0000000..bfe542d
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
@@ -0,0 +1,216 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strings"
+
+ "strconv"
+)
+
+type Recognizer interface {
+ GetLiteralNames() []string
+ GetSymbolicNames() []string
+ GetRuleNames() []string
+
+ Sempred(RuleContext, int, int) bool
+ Precpred(RuleContext, int) bool
+
+ GetState() int
+ SetState(int)
+ Action(RuleContext, int, int)
+ AddErrorListener(ErrorListener)
+ RemoveErrorListeners()
+ GetATN() *ATN
+ GetErrorListenerDispatch() ErrorListener
+}
+
+type BaseRecognizer struct {
+ listeners []ErrorListener
+ state int
+
+ RuleNames []string
+ LiteralNames []string
+ SymbolicNames []string
+ GrammarFileName string
+}
+
+func NewBaseRecognizer() *BaseRecognizer {
+ rec := new(BaseRecognizer)
+ rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
+ rec.state = -1
+ return rec
+}
+
+var tokenTypeMapCache = make(map[string]int)
+var ruleIndexMapCache = make(map[string]int)
+
+func (b *BaseRecognizer) checkVersion(toolVersion string) {
+ runtimeVersion := "4.12.0"
+ if runtimeVersion != toolVersion {
+ fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
+ }
+}
+
+func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
+ panic("action not implemented on Recognizer!")
+}
+
+func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
+ b.listeners = append(b.listeners, listener)
+}
+
+func (b *BaseRecognizer) RemoveErrorListeners() {
+ b.listeners = make([]ErrorListener, 0)
+}
+
+func (b *BaseRecognizer) GetRuleNames() []string {
+ return b.RuleNames
+}
+
+func (b *BaseRecognizer) GetTokenNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetSymbolicNames() []string {
+ return b.SymbolicNames
+}
+
+func (b *BaseRecognizer) GetLiteralNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetState() int {
+ return b.state
+}
+
+func (b *BaseRecognizer) SetState(v int) {
+ b.state = v
+}
+
+//func (b *Recognizer) GetTokenTypeMap() {
+// var tokenNames = b.GetTokenNames()
+// if (tokenNames==nil) {
+// panic("The current recognizer does not provide a list of token names.")
+// }
+// var result = tokenTypeMapCache[tokenNames]
+// if(result==nil) {
+// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
+// result.EOF = TokenEOF
+// tokenTypeMapCache[tokenNames] = result
+// }
+// return result
+//}
+
+// Get a map from rule names to rule indexes.
+//
+//
Used for XPath and tree pattern compilation.
+func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
+
+ panic("Method not defined!")
+ // var ruleNames = b.GetRuleNames()
+ // if (ruleNames==nil) {
+ // panic("The current recognizer does not provide a list of rule names.")
+ // }
+ //
+ // var result = ruleIndexMapCache[ruleNames]
+ // if(result==nil) {
+ // result = ruleNames.reduce(function(o, k, i) { o[k] = i })
+ // ruleIndexMapCache[ruleNames] = result
+ // }
+ // return result
+}
+
+func (b *BaseRecognizer) GetTokenType(tokenName string) int {
+ panic("Method not defined!")
+ // var ttype = b.GetTokenTypeMap()[tokenName]
+ // if (ttype !=nil) {
+ // return ttype
+ // } else {
+ // return TokenInvalidType
+ // }
+}
+
+//func (b *Recognizer) GetTokenTypeMap() map[string]int {
+// Vocabulary vocabulary = getVocabulary()
+//
+// Synchronized (tokenTypeMapCache) {
+// Map result = tokenTypeMapCache.Get(vocabulary)
+// if (result == null) {
+// result = new HashMap()
+// for (int i = 0; i < GetATN().maxTokenType; i++) {
+// String literalName = vocabulary.getLiteralName(i)
+// if (literalName != null) {
+// result.put(literalName, i)
+// }
+//
+// String symbolicName = vocabulary.GetSymbolicName(i)
+// if (symbolicName != null) {
+// result.put(symbolicName, i)
+// }
+// }
+//
+// result.put("EOF", Token.EOF)
+// result = Collections.unmodifiableMap(result)
+// tokenTypeMapCache.put(vocabulary, result)
+// }
+//
+// return result
+// }
+//}
+
+// What is the error header, normally line/character position information?//
+func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
+ line := e.GetOffendingToken().GetLine()
+ column := e.GetOffendingToken().GetColumn()
+ return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
+}
+
+// How should a token be displayed in an error message? The default
+//
+// is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
+//
+// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
+// implementations of {@link ANTLRErrorStrategy} may provide a similar
+// feature when necessary. For example, see
+// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
+func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+
+ return "'" + s + "'"
+}
+
+func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
+ return NewProxyErrorListener(b.listeners)
+}
+
+// subclass needs to override these if there are sempreds or actions
+// that the ATN interp needs to execute
+func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
+ return true
+}
+
+func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
+ return true
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
new file mode 100644
index 0000000..210699b
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
@@ -0,0 +1,114 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// A rule context is a record of a single rule invocation. It knows
+// which context invoked it, if any. If there is no parent context, then
+// naturally the invoking state is not valid. The parent link
+// provides a chain upwards from the current rule invocation to the root
+// of the invocation tree, forming a stack. We actually carry no
+// information about the rule associated with b context (except
+// when parsing). We keep only the state number of the invoking state from
+// the ATN submachine that invoked b. Contrast b with the s
+// pointer inside ParserRuleContext that tracks the current state
+// being "executed" for the current rule.
+//
+// The parent contexts are useful for computing lookahead sets and
+// getting error information.
+//
+// These objects are used during parsing and prediction.
+// For the special case of parsers, we use the subclass
+// ParserRuleContext.
+//
+// @see ParserRuleContext
+//
+
+type RuleContext interface {
+ RuleNode
+
+ GetInvokingState() int
+ SetInvokingState(int)
+
+ GetRuleIndex() int
+ IsEmpty() bool
+
+ GetAltNumber() int
+ SetAltNumber(altNumber int)
+
+ String([]string, RuleContext) string
+}
+
+type BaseRuleContext struct {
+ parentCtx RuleContext
+ invokingState int
+ RuleIndex int
+}
+
+func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
+
+ rn := new(BaseRuleContext)
+
+ // What context invoked b rule?
+ rn.parentCtx = parent
+
+ // What state invoked the rule associated with b context?
+ // The "return address" is the followState of invokingState
+ // If parent is nil, b should be -1.
+ if parent == nil {
+ rn.invokingState = -1
+ } else {
+ rn.invokingState = invokingState
+ }
+
+ return rn
+}
+
+func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
+ return b
+}
+
+func (b *BaseRuleContext) SetParent(v Tree) {
+ if v == nil {
+ b.parentCtx = nil
+ } else {
+ b.parentCtx = v.(RuleContext)
+ }
+}
+
+func (b *BaseRuleContext) GetInvokingState() int {
+ return b.invokingState
+}
+
+func (b *BaseRuleContext) SetInvokingState(t int) {
+ b.invokingState = t
+}
+
+func (b *BaseRuleContext) GetRuleIndex() int {
+ return b.RuleIndex
+}
+
+func (b *BaseRuleContext) GetAltNumber() int {
+ return ATNInvalidAltNumber
+}
+
+func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
+
+// A context is empty if there is no invoking state meaning nobody call
+// current context.
+func (b *BaseRuleContext) IsEmpty() bool {
+ return b.invokingState == -1
+}
+
+// Return the combined text of all child nodes. This method only considers
+// tokens which have been added to the parse tree.
+//
+// Since tokens on hidden channels (e.g. whitespace or comments) are not
+// added to the parse trees, they will not appear in the output of b
+// method.
+//
+
+func (b *BaseRuleContext) GetParent() Tree {
+ return b.parentCtx
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
new file mode 100644
index 0000000..a702e99
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
@@ -0,0 +1,469 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// A tree structure used to record the semantic context in which
+// an ATN configuration is valid. It's either a single predicate,
+// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
+//
+//
I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
+// {@link SemanticContext} within the scope of this outer class.
+//
+
+type SemanticContext interface {
+ Equals(other Collectable[SemanticContext]) bool
+ Hash() int
+
+ evaluate(parser Recognizer, outerContext RuleContext) bool
+ evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
+
+ String() string
+}
+
+func SemanticContextandContext(a, b SemanticContext) SemanticContext {
+ if a == nil || a == SemanticContextNone {
+ return b
+ }
+ if b == nil || b == SemanticContextNone {
+ return a
+ }
+ result := NewAND(a, b)
+ if len(result.opnds) == 1 {
+ return result.opnds[0]
+ }
+
+ return result
+}
+
+func SemanticContextorContext(a, b SemanticContext) SemanticContext {
+ if a == nil {
+ return b
+ }
+ if b == nil {
+ return a
+ }
+ if a == SemanticContextNone || b == SemanticContextNone {
+ return SemanticContextNone
+ }
+ result := NewOR(a, b)
+ if len(result.opnds) == 1 {
+ return result.opnds[0]
+ }
+
+ return result
+}
+
+type Predicate struct {
+ ruleIndex int
+ predIndex int
+ isCtxDependent bool
+}
+
+func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
+ p := new(Predicate)
+
+ p.ruleIndex = ruleIndex
+ p.predIndex = predIndex
+ p.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ return p
+}
+
+//The default {@link SemanticContext}, which is semantically equivalent to
+//a predicate of the form {@code {true}?}.
+
+var SemanticContextNone = NewPredicate(-1, -1, false)
+
+func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ return p
+}
+
+func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
+
+ var localctx RuleContext
+
+ if p.isCtxDependent {
+ localctx = outerContext
+ }
+
+ return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
+}
+
+func (p *Predicate) Equals(other Collectable[SemanticContext]) bool {
+ if p == other {
+ return true
+ } else if _, ok := other.(*Predicate); !ok {
+ return false
+ } else {
+ return p.ruleIndex == other.(*Predicate).ruleIndex &&
+ p.predIndex == other.(*Predicate).predIndex &&
+ p.isCtxDependent == other.(*Predicate).isCtxDependent
+ }
+}
+
+func (p *Predicate) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, p.ruleIndex)
+ h = murmurUpdate(h, p.predIndex)
+ if p.isCtxDependent {
+ h = murmurUpdate(h, 1)
+ } else {
+ h = murmurUpdate(h, 0)
+ }
+ return murmurFinish(h, 3)
+}
+
+func (p *Predicate) String() string {
+ return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?"
+}
+
+type PrecedencePredicate struct {
+ precedence int
+}
+
+func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
+
+ p := new(PrecedencePredicate)
+ p.precedence = precedence
+
+ return p
+}
+
+func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ return parser.Precpred(outerContext, p.precedence)
+}
+
+func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ if parser.Precpred(outerContext, p.precedence) {
+ return SemanticContextNone
+ }
+
+ return nil
+}
+
+func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
+ return p.precedence - other.precedence
+}
+
+func (p *PrecedencePredicate) Equals(other Collectable[SemanticContext]) bool {
+
+ var op *PrecedencePredicate
+ var ok bool
+ if op, ok = other.(*PrecedencePredicate); !ok {
+ return false
+ }
+
+ if p == op {
+ return true
+ }
+
+ return p.precedence == other.(*PrecedencePredicate).precedence
+}
+
+func (p *PrecedencePredicate) Hash() int {
+ h := uint32(1)
+ h = 31*h + uint32(p.precedence)
+ return int(h)
+}
+
+func (p *PrecedencePredicate) String() string {
+ return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
+}
+
+func PrecedencePredicatefilterPrecedencePredicates(set *JStore[SemanticContext, Comparator[SemanticContext]]) []*PrecedencePredicate {
+ result := make([]*PrecedencePredicate, 0)
+
+ set.Each(func(v SemanticContext) bool {
+ if c2, ok := v.(*PrecedencePredicate); ok {
+ result = append(result, c2)
+ }
+ return true
+ })
+
+ return result
+}
+
+// A semantic context which is true whenever none of the contained contexts
+// is false.`
+
+type AND struct {
+ opnds []SemanticContext
+}
+
+func NewAND(a, b SemanticContext) *AND {
+
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
+ if aa, ok := a.(*AND); ok {
+ for _, o := range aa.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(a)
+ }
+
+ if ba, ok := b.(*AND); ok {
+ for _, o := range ba.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(b)
+ }
+ precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
+ if len(precedencePredicates) > 0 {
+ // interested in the transition with the lowest precedence
+ var reduced *PrecedencePredicate
+
+ for _, p := range precedencePredicates {
+ if reduced == nil || p.precedence < reduced.precedence {
+ reduced = p
+ }
+ }
+
+ operands.Put(reduced)
+ }
+
+ vs := operands.Values()
+ opnds := make([]SemanticContext, len(vs))
+ for i, v := range vs {
+ opnds[i] = v.(SemanticContext)
+ }
+
+ and := new(AND)
+ and.opnds = opnds
+
+ return and
+}
+
+func (a *AND) Equals(other Collectable[SemanticContext]) bool {
+ if a == other {
+ return true
+ }
+ if _, ok := other.(*AND); !ok {
+ return false
+ } else {
+ for i, v := range other.(*AND).opnds {
+ if !a.opnds[i].Equals(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+// {@inheritDoc}
+//
+//
+// The evaluation of predicates by a context is short-circuiting, but
+// unordered.
+func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ for i := 0; i < len(a.opnds); i++ {
+ if !a.opnds[i].evaluate(parser, outerContext) {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ differs := false
+ operands := make([]SemanticContext, 0)
+
+ for i := 0; i < len(a.opnds); i++ {
+ context := a.opnds[i]
+ evaluated := context.evalPrecedence(parser, outerContext)
+ differs = differs || (evaluated != context)
+ if evaluated == nil {
+ // The AND context is false if any element is false
+ return nil
+ } else if evaluated != SemanticContextNone {
+ // Reduce the result by Skipping true elements
+ operands = append(operands, evaluated)
+ }
+ }
+ if !differs {
+ return a
+ }
+
+ if len(operands) == 0 {
+ // all elements were true, so the AND context is true
+ return SemanticContextNone
+ }
+
+ var result SemanticContext
+
+ for _, o := range operands {
+ if result == nil {
+ result = o
+ } else {
+ result = SemanticContextandContext(result, o)
+ }
+ }
+
+ return result
+}
+
+func (a *AND) Hash() int {
+ h := murmurInit(37) // Init with a value different from OR
+ for _, op := range a.opnds {
+ h = murmurUpdate(h, op.Hash())
+ }
+ return murmurFinish(h, len(a.opnds))
+}
+
+func (a *OR) Hash() int {
+ h := murmurInit(41) // Init with a value different from AND
+ for _, op := range a.opnds {
+ h = murmurUpdate(h, op.Hash())
+ }
+ return murmurFinish(h, len(a.opnds))
+}
+
+func (a *AND) String() string {
+ s := ""
+
+ for _, o := range a.opnds {
+ s += "&& " + fmt.Sprint(o)
+ }
+
+ if len(s) > 3 {
+ return s[0:3]
+ }
+
+ return s
+}
+
+//
+// A semantic context which is true whenever at least one of the contained
+// contexts is true.
+//
+
+type OR struct {
+ opnds []SemanticContext
+}
+
+func NewOR(a, b SemanticContext) *OR {
+
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
+ if aa, ok := a.(*OR); ok {
+ for _, o := range aa.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(a)
+ }
+
+ if ba, ok := b.(*OR); ok {
+ for _, o := range ba.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(b)
+ }
+ precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
+ if len(precedencePredicates) > 0 {
+ // interested in the transition with the lowest precedence
+ var reduced *PrecedencePredicate
+
+ for _, p := range precedencePredicates {
+ if reduced == nil || p.precedence > reduced.precedence {
+ reduced = p
+ }
+ }
+
+ operands.Put(reduced)
+ }
+
+ vs := operands.Values()
+
+ opnds := make([]SemanticContext, len(vs))
+ for i, v := range vs {
+ opnds[i] = v.(SemanticContext)
+ }
+
+ o := new(OR)
+ o.opnds = opnds
+
+ return o
+}
+
+func (o *OR) Equals(other Collectable[SemanticContext]) bool {
+ if o == other {
+ return true
+ } else if _, ok := other.(*OR); !ok {
+ return false
+ } else {
+ for i, v := range other.(*OR).opnds {
+ if !o.opnds[i].Equals(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+//
+// The evaluation of predicates by o context is short-circuiting, but
+// unordered.
+func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ for i := 0; i < len(o.opnds); i++ {
+ if o.opnds[i].evaluate(parser, outerContext) {
+ return true
+ }
+ }
+ return false
+}
+
+func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ differs := false
+ operands := make([]SemanticContext, 0)
+ for i := 0; i < len(o.opnds); i++ {
+ context := o.opnds[i]
+ evaluated := context.evalPrecedence(parser, outerContext)
+ differs = differs || (evaluated != context)
+ if evaluated == SemanticContextNone {
+ // The OR context is true if any element is true
+ return SemanticContextNone
+ } else if evaluated != nil {
+ // Reduce the result by Skipping false elements
+ operands = append(operands, evaluated)
+ }
+ }
+ if !differs {
+ return o
+ }
+ if len(operands) == 0 {
+ // all elements were false, so the OR context is false
+ return nil
+ }
+ var result SemanticContext
+
+ for _, o := range operands {
+ if result == nil {
+ result = o
+ } else {
+ result = SemanticContextorContext(result, o)
+ }
+ }
+
+ return result
+}
+
+func (o *OR) String() string {
+ s := ""
+
+ for _, o := range o.opnds {
+ s += "|| " + fmt.Sprint(o)
+ }
+
+ if len(s) > 3 {
+ return s[0:3]
+ }
+
+ return s
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
new file mode 100644
index 0000000..f73b06b
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
@@ -0,0 +1,209 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type TokenSourceCharStreamPair struct {
+ tokenSource TokenSource
+ charStream CharStream
+}
+
+// A token has properties: text, type, line, character position in the line
+// (so we can ignore tabs), token channel, index, and source from which
+// we obtained this token.
+
+type Token interface {
+ GetSource() *TokenSourceCharStreamPair
+ GetTokenType() int
+ GetChannel() int
+ GetStart() int
+ GetStop() int
+ GetLine() int
+ GetColumn() int
+
+ GetText() string
+ SetText(s string)
+
+ GetTokenIndex() int
+ SetTokenIndex(v int)
+
+ GetTokenSource() TokenSource
+ GetInputStream() CharStream
+}
+
+type BaseToken struct {
+ source *TokenSourceCharStreamPair
+ tokenType int // token type of the token
+ channel int // The parser ignores everything not on DEFAULT_CHANNEL
+ start int // optional return -1 if not implemented.
+ stop int // optional return -1 if not implemented.
+ tokenIndex int // from 0..n-1 of the token object in the input stream
+ line int // line=1..n of the 1st character
+ column int // beginning of the line at which it occurs, 0..n-1
+ text string // text of the token.
+ readOnly bool
+}
+
+const (
+ TokenInvalidType = 0
+
+ // During lookahead operations, this "token" signifies we hit rule end ATN state
+ // and did not follow it despite needing to.
+ TokenEpsilon = -2
+
+ TokenMinUserTokenType = 1
+
+ TokenEOF = -1
+
+ // All tokens go to the parser (unless Skip() is called in that rule)
+ // on a particular "channel". The parser tunes to a particular channel
+ // so that whitespace etc... can go to the parser on a "hidden" channel.
+
+ TokenDefaultChannel = 0
+
+ // Anything on different channel than DEFAULT_CHANNEL is not parsed
+ // by parser.
+
+ TokenHiddenChannel = 1
+)
+
+func (b *BaseToken) GetChannel() int {
+ return b.channel
+}
+
+func (b *BaseToken) GetStart() int {
+ return b.start
+}
+
+func (b *BaseToken) GetStop() int {
+ return b.stop
+}
+
+func (b *BaseToken) GetLine() int {
+ return b.line
+}
+
+func (b *BaseToken) GetColumn() int {
+ return b.column
+}
+
+func (b *BaseToken) GetTokenType() int {
+ return b.tokenType
+}
+
+func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
+ return b.source
+}
+
+func (b *BaseToken) GetTokenIndex() int {
+ return b.tokenIndex
+}
+
+func (b *BaseToken) SetTokenIndex(v int) {
+ b.tokenIndex = v
+}
+
+func (b *BaseToken) GetTokenSource() TokenSource {
+ return b.source.tokenSource
+}
+
+func (b *BaseToken) GetInputStream() CharStream {
+ return b.source.charStream
+}
+
+type CommonToken struct {
+ *BaseToken
+}
+
+func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
+
+ t := new(CommonToken)
+
+ t.BaseToken = new(BaseToken)
+
+ t.source = source
+ t.tokenType = tokenType
+ t.channel = channel
+ t.start = start
+ t.stop = stop
+ t.tokenIndex = -1
+ if t.source.tokenSource != nil {
+ t.line = source.tokenSource.GetLine()
+ t.column = source.tokenSource.GetCharPositionInLine()
+ } else {
+ t.column = -1
+ }
+ return t
+}
+
+// An empty {@link Pair} which is used as the default value of
+// {@link //source} for tokens that do not have a source.
+
+//CommonToken.EMPTY_SOURCE = [ nil, nil ]
+
+// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
+//
+//
+// If {@code oldToken} is also a {@link CommonToken} instance, the newly
+// constructed token will share a reference to the {@link //text} field and
+// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
+// be assigned the result of calling {@link //GetText}, and {@link //source}
+// will be constructed from the result of {@link Token//GetTokenSource} and
+// {@link Token//GetInputStream}.
+//
+// @param oldToken The token to copy.
+func (c *CommonToken) clone() *CommonToken {
+ t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
+ t.tokenIndex = c.GetTokenIndex()
+ t.line = c.GetLine()
+ t.column = c.GetColumn()
+ t.text = c.GetText()
+ return t
+}
+
+func (c *CommonToken) GetText() string {
+ if c.text != "" {
+ return c.text
+ }
+ input := c.GetInputStream()
+ if input == nil {
+ return ""
+ }
+ n := input.Size()
+ if c.start < n && c.stop < n {
+ return input.GetTextFromInterval(NewInterval(c.start, c.stop))
+ }
+ return ""
+}
+
+func (c *CommonToken) SetText(text string) {
+ c.text = text
+}
+
+func (c *CommonToken) String() string {
+ txt := c.GetText()
+ if txt != "" {
+ txt = strings.Replace(txt, "\n", "\\n", -1)
+ txt = strings.Replace(txt, "\r", "\\r", -1)
+ txt = strings.Replace(txt, "\t", "\\t", -1)
+ } else {
+ txt = ""
+ }
+
+ var ch string
+ if c.channel > 0 {
+ ch = ",channel=" + strconv.Itoa(c.channel)
+ } else {
+ ch = ""
+ }
+
+ return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
+ txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
+ ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go
new file mode 100644
index 0000000..a3f36ea
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go
@@ -0,0 +1,17 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type TokenSource interface {
+ NextToken() Token
+ Skip()
+ More()
+ GetLine() int
+ GetCharPositionInLine() int
+ GetInputStream() CharStream
+ GetSourceName() string
+ setTokenFactory(factory TokenFactory)
+ GetTokenFactory() TokenFactory
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
new file mode 100644
index 0000000..1527d43
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type TokenStream interface {
+ IntStream
+
+ LT(k int) Token
+
+ Get(index int) Token
+ GetTokenSource() TokenSource
+ SetTokenSource(TokenSource)
+
+ GetAllText() string
+ GetTextFromInterval(*Interval) string
+ GetTextFromRuleContext(RuleContext) string
+ GetTextFromTokens(Token, Token) string
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
new file mode 100644
index 0000000..b3e38af
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
@@ -0,0 +1,659 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "fmt"
+)
+
+//
+// Useful for rewriting out a buffered input token stream after doing some
+// augmentation or other manipulations on it.
+
+//
+// You can insert stuff, replace, and delete chunks. Note that the operations
+// are done lazily--only if you convert the buffer to a {@link String} with
+// {@link TokenStream#getText()}. This is very efficient because you are not
+// moving data around all the time. As the buffer of tokens is converted to
+// strings, the {@link #getText()} method(s) scan the input token stream and
+// check to see if there is an operation at the current index. If so, the
+// operation is done and then normal {@link String} rendering continues on the
+// buffer. This is like having multiple Turing machine instruction streams
+// (programs) operating on a single input tape. :)
+//
+
+// This rewriter makes no modifications to the token stream. It does not ask the
+// stream to fill itself up nor does it advance the input cursor. The token
+// stream {@link TokenStream#index()} will return the same value before and
+// after any {@link #getText()} call.
+
+//
+// The rewriter only works on tokens that you have in the buffer and ignores the
+// current input cursor. If you are buffering tokens on-demand, calling
+// {@link #getText()} halfway through the input will only do rewrites for those
+// tokens in the first half of the file.
+
+//
+// Since the operations are done lazily at {@link #getText}-time, operations do
+// not screw up the token index values. That is, an insert operation at token
+// index {@code i} does not change the index values for tokens
+// {@code i}+1..n-1.
+
+//
+// Because operations never actually alter the buffer, you may always get the
+// original token stream back without undoing anything. Since the instructions
+// are queued up, you can easily simulate transactions and roll back any changes
+// if there is an error just by removing instructions. For example,
+
+//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+//
+
+//
+// Then in the rules, you can execute (assuming rewriter is visible):
+
+//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+//
+
+//
+// You can also have multiple "instruction streams" and get multiple rewrites
+// from a single pass over the input. Just name the instruction streams and use
+// that name again when printing the buffer. This could be useful for generating
+// a C file and also its header file--all from the same buffer:
+
+//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+//
+
+//
+// If you don't use named rewrite streams, a "default" stream is used as the
+// first example shows.
+
+const (
+ Default_Program_Name = "default"
+ Program_Init_Size = 100
+ Min_Token_Index = 0
+)
+
+// Define the rewrite operation hierarchy
+
+type RewriteOperation interface {
+ // Execute the rewrite operation by possibly adding to the buffer.
+ // Return the index of the next token to operate on.
+ Execute(buffer *bytes.Buffer) int
+ String() string
+ GetInstructionIndex() int
+ GetIndex() int
+ GetText() string
+ GetOpName() string
+ GetTokens() TokenStream
+ SetInstructionIndex(val int)
+ SetIndex(int)
+ SetText(string)
+ SetOpName(string)
+ SetTokens(TokenStream)
+}
+
+type BaseRewriteOperation struct {
+ //Current index of rewrites list
+ instruction_index int
+ //Token buffer index
+ index int
+ //Substitution text
+ text string
+ //Actual operation name
+ op_name string
+ //Pointer to token steam
+ tokens TokenStream
+}
+
+func (op *BaseRewriteOperation) GetInstructionIndex() int {
+ return op.instruction_index
+}
+
+func (op *BaseRewriteOperation) GetIndex() int {
+ return op.index
+}
+
+func (op *BaseRewriteOperation) GetText() string {
+ return op.text
+}
+
+func (op *BaseRewriteOperation) GetOpName() string {
+ return op.op_name
+}
+
+func (op *BaseRewriteOperation) GetTokens() TokenStream {
+ return op.tokens
+}
+
+func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
+ op.instruction_index = val
+}
+
+func (op *BaseRewriteOperation) SetIndex(val int) {
+ op.index = val
+}
+
+func (op *BaseRewriteOperation) SetText(val string) {
+ op.text = val
+}
+
+func (op *BaseRewriteOperation) SetOpName(val string) {
+ op.op_name = val
+}
+
+func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
+ op.tokens = val
+}
+
+func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int {
+ return op.index
+}
+
+func (op *BaseRewriteOperation) String() string {
+ return fmt.Sprintf("<%s@%d:\"%s\">",
+ op.op_name,
+ op.tokens.Get(op.GetIndex()),
+ op.text,
+ )
+
+}
+
+type InsertBeforeOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
+ return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
+ index: index,
+ text: text,
+ op_name: "InsertBeforeOp",
+ tokens: stream,
+ }}
+}
+
+func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index + 1
+}
+
+func (op *InsertBeforeOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// Distinguish between insert after/before to do the "insert afters"
+// first and then the "insert befores" at same index. Implementation
+// of "insert after" is "insert before index+1".
+
+type InsertAfterOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
+ return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{
+ index: index + 1,
+ text: text,
+ tokens: stream,
+ }}
+}
+
+func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index + 1
+}
+
+func (op *InsertAfterOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
+// instructions.
+type ReplaceOp struct {
+ BaseRewriteOperation
+ LastIndex int
+}
+
+func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
+ return &ReplaceOp{
+ BaseRewriteOperation: BaseRewriteOperation{
+ index: from,
+ text: text,
+ op_name: "ReplaceOp",
+ tokens: stream,
+ },
+ LastIndex: to,
+ }
+}
+
+func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int {
+ if op.text != "" {
+ buffer.WriteString(op.text)
+ }
+ return op.LastIndex + 1
+}
+
+func (op *ReplaceOp) String() string {
+ if op.text == "" {
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
+ }
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
+}
+
+type TokenStreamRewriter struct {
+ //Our source stream
+ tokens TokenStream
+ // You may have multiple, named streams of rewrite operations.
+ // I'm calling these things "programs."
+ // Maps String (name) → rewrite (List)
+ programs map[string][]RewriteOperation
+ last_rewrite_token_indexes map[string]int
+}
+
+func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
+ return &TokenStreamRewriter{
+ tokens: tokens,
+ programs: map[string][]RewriteOperation{
+ Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size),
+ },
+ last_rewrite_token_indexes: map[string]int{},
+ }
+}
+
+func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
+ return tsr.tokens
+}
+
+// Rollback the instruction stream for a program so that
+// the indicated instruction (via instructionIndex) is no
+// longer in the stream. UNTESTED!
+func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) {
+ is, ok := tsr.programs[program_name]
+ if ok {
+ tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
+ }
+}
+
+func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) {
+ tsr.Rollback(Default_Program_Name, instruction_index)
+}
+
+// Reset the program so that no instructions exist
+func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) {
+ tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
+}
+
+func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
+ tsr.DeleteProgram(Default_Program_Name)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) {
+ // to insert after, just insert before next index (even if past end)
+ var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
+ tsr.InsertAfter(Default_Program_Name, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) {
+ tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) {
+ var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
+ tsr.InsertBefore(Default_Program_Name, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) {
+ tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) {
+ if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
+ panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
+ from, to, tsr.tokens.Size()))
+ }
+ var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
+ tsr.Replace(Default_Program_Name, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
+ tsr.ReplaceDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) {
+ tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
+ tsr.ReplaceToken(Default_Program_Name, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
+ tsr.ReplaceTokenDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) {
+ tsr.Replace(program_name, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
+ tsr.Delete(Default_Program_Name, from, to)
+}
+
+func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
+ tsr.DeleteDefault(index, index)
+}
+
+func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) {
+ tsr.ReplaceToken(program_name, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
+ tsr.DeleteToken(Default_Program_Name, from, to)
+}
+
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int {
+ i, ok := tsr.last_rewrite_token_indexes[program_name]
+ if !ok {
+ return -1
+ }
+ return i
+}
+
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
+ return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
+}
+
+func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) {
+ tsr.last_rewrite_token_indexes[program_name] = i
+}
+
+func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
+ is := make([]RewriteOperation, 0, Program_Init_Size)
+ tsr.programs[name] = is
+ return is
+}
+
+func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) {
+ is := tsr.GetProgram(name)
+ is = append(is, op)
+ tsr.programs[name] = is
+}
+
+func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
+ is, ok := tsr.programs[name]
+ if !ok {
+ is = tsr.InitializeProgram(name)
+ }
+ return is
+}
+
+// Return the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetTextDefault() string {
+ return tsr.GetText(
+ Default_Program_Name,
+ NewInterval(0, tsr.tokens.Size()-1))
+}
+
+// Return the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string {
+ rewrites := tsr.programs[program_name]
+ start := interval.Start
+ stop := interval.Stop
+ // ensure start/end are in range
+ stop = min(stop, tsr.tokens.Size()-1)
+ start = max(start, 0)
+ if rewrites == nil || len(rewrites) == 0 {
+ return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
+ }
+ buf := bytes.Buffer{}
+ // First, optimize instruction stream
+ indexToOp := reduceToSingleOperationPerIndex(rewrites)
+ // Walk buffer, executing instructions and emitting tokens
+ for i := start; i <= stop && i < tsr.tokens.Size(); {
+ op := indexToOp[i]
+ delete(indexToOp, i) // remove so any left have index size-1
+ t := tsr.tokens.Get(i)
+ if op == nil {
+ // no operation at that index, just dump token
+ if t.GetTokenType() != TokenEOF {
+ buf.WriteString(t.GetText())
+ }
+ i++ // move to next token
+ } else {
+ i = op.Execute(&buf) // execute operation and skip
+ }
+ }
+ // include stuff after end if it's last index in buffer
+ // So, if they did an insertAfter(lastValidIndex, "foo"), include
+ // foo if end==lastValidIndex.
+ if stop == tsr.tokens.Size()-1 {
+ // Scan any remaining operations after last token
+ // should be included (they will be inserts).
+ for _, op := range indexToOp {
+ if op.GetIndex() >= tsr.tokens.Size()-1 {
+ buf.WriteString(op.GetText())
+ }
+ }
+ }
+ return buf.String()
+}
+
+// We need to combine operations and report invalid operations (like
+// overlapping replaces that are not completed nested). Inserts to
+// same index need to be combined etc... Here are the cases:
+//
+// I.i.u I.j.v leave alone, nonoverlapping
+// I.i.u I.i.v combine: Iivu
+//
+// R.i-j.u R.x-y.v | i-j in x-y delete first R
+// R.i-j.u R.i-j.v delete first R
+// R.i-j.u R.x-y.v | x-y in i-j ERROR
+// R.i-j.u R.x-y.v | boundaries overlap ERROR
+//
+// Delete special case of replace (text==null):
+// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+//
+// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
+// we're not deleting i)
+// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
+// R.x-y.v I.i.u | i in x-y ERROR
+// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
+// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
+//
+// I.i.u = insert u before op @ index i
+// R.x-y.u = replace x-y indexed tokens with u
+//
+// First we need to examine replaces. For any replace op:
+//
+// 1. wipe out any insertions before op within that range.
+// 2. Drop any replace op before that is contained completely within
+// that range.
+// 3. Throw exception upon boundary overlap with any previous replace.
+//
+// Then we can deal with inserts:
+//
+// 1. for any inserts to same index, combine even if not adjacent.
+// 2. for any prior replace with same left boundary, combine this
+// insert with replace and delete this replace.
+// 3. throw exception if index in same range as previous replace
+//
+// Don't actually delete; make op null in list. Easier to walk list.
+// Later we can throw as we add to index → op map.
+//
+// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+// inserted stuff would be before the replace range. But, if you
+// add tokens in front of a method body '{' and then delete the method
+// body, I think the stuff before the '{' you added should disappear too.
+//
+// Return a map from token index to operation.
+func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
+ // WALK REPLACES
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ rop, ok := op.(*ReplaceOp)
+ if !ok {
+ continue
+ }
+ // Wipe prior inserts within range
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if iop, ok := rewrites[j].(*InsertBeforeOp); ok {
+ if iop.index == rop.index {
+ // E.g., insert before 2, delete 2..2; update replace
+ // text to include insert before, kill insert
+ rewrites[iop.instruction_index] = nil
+ if rop.text != "" {
+ rop.text = iop.text + rop.text
+ } else {
+ rop.text = iop.text
+ }
+ } else if iop.index > rop.index && iop.index <= rop.LastIndex {
+ // delete insert as it's a no-op.
+ rewrites[iop.instruction_index] = nil
+ }
+ }
+ }
+ // Drop any prior replaces contained within
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if prevop, ok := rewrites[j].(*ReplaceOp); ok {
+ if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
+ // delete replace as it's a no-op.
+ rewrites[prevop.instruction_index] = nil
+ continue
+ }
+ // throw exception unless disjoint or identical
+ disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
+ // Delete special case of replace (text==null):
+ // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+ if prevop.text == "" && rop.text == "" && !disjoint {
+ rewrites[prevop.instruction_index] = nil
+ rop.index = min(prevop.index, rop.index)
+ rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
+ println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
+ } else if !disjoint {
+ panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
+ }
+ }
+ }
+ }
+ // WALK INSERTS
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ //hack to replicate inheritance in composition
+ _, iok := rewrites[i].(*InsertBeforeOp)
+ _, aok := rewrites[i].(*InsertAfterOp)
+ if !iok && !aok {
+ continue
+ }
+ iop := rewrites[i]
+ // combine current insert with prior if any at same index
+ // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if nextIop, ok := rewrites[j].(*InsertAfterOp); ok {
+ if nextIop.index == iop.GetIndex() {
+ iop.SetText(nextIop.text + iop.GetText())
+ rewrites[j] = nil
+ }
+ }
+ if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok {
+ if prevIop.index == iop.GetIndex() {
+ iop.SetText(iop.GetText() + prevIop.text)
+ rewrites[prevIop.instruction_index] = nil
+ }
+ }
+ }
+ // look for replaces where iop.index is in range; error
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if rop, ok := rewrites[j].(*ReplaceOp); ok {
+ if iop.GetIndex() == rop.index {
+ rop.text = iop.GetText() + rop.text
+ rewrites[i] = nil
+ continue
+ }
+ if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex {
+ panic("insert op " + iop.String() + " within boundaries of previous " + rop.String())
+ }
+ }
+ }
+ }
+ m := map[int]RewriteOperation{}
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ if _, ok := m[op.GetIndex()]; ok {
+ panic("should only be one op per index")
+ }
+ m[op.GetIndex()] = op
+ }
+ return m
+}
+
+/*
+ Quick fixing Go lack of overloads
+*/
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ } else {
+ return b
+ }
+}
+func min(a, b int) int {
+ if a < b {
+ return a
+ } else {
+ return b
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go
new file mode 100644
index 0000000..7b663bf
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "fmt"
+
+type TraceListener struct {
+ parser *BaseParser
+}
+
+func NewTraceListener(parser *BaseParser) *TraceListener {
+ tl := new(TraceListener)
+ tl.parser = parser
+ return tl
+}
+
+func (t *TraceListener) VisitErrorNode(_ ErrorNode) {
+}
+
+func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) {
+ fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
+}
+
+func (t *TraceListener) VisitTerminal(node TerminalNode) {
+ fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()])
+}
+
+func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) {
+ fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go
new file mode 100644
index 0000000..36be4f7
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go
@@ -0,0 +1,428 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// atom, set, epsilon, action, predicate, rule transitions.
+//
+//
This is a one way link. It emanates from a state (usually via a list of
+// transitions) and has a target state.
+//
+//
Since we never have to change the ATN transitions once we construct it,
+// the states. We'll use the term Edge for the DFA to distinguish them from
+// ATN transitions.
+
+type Transition interface {
+ getTarget() ATNState
+ setTarget(ATNState)
+ getIsEpsilon() bool
+ getLabel() *IntervalSet
+ getSerializationType() int
+ Matches(int, int, int) bool
+}
+
+type BaseTransition struct {
+ target ATNState
+ isEpsilon bool
+ label int
+ intervalSet *IntervalSet
+ serializationType int
+}
+
+func NewBaseTransition(target ATNState) *BaseTransition {
+
+ if target == nil {
+ panic("target cannot be nil.")
+ }
+
+ t := new(BaseTransition)
+
+ t.target = target
+ // Are we epsilon, action, sempred?
+ t.isEpsilon = false
+ t.intervalSet = nil
+
+ return t
+}
+
+func (t *BaseTransition) getTarget() ATNState {
+ return t.target
+}
+
+func (t *BaseTransition) setTarget(s ATNState) {
+ t.target = s
+}
+
+func (t *BaseTransition) getIsEpsilon() bool {
+ return t.isEpsilon
+}
+
+func (t *BaseTransition) getLabel() *IntervalSet {
+ return t.intervalSet
+}
+
+func (t *BaseTransition) getSerializationType() int {
+ return t.serializationType
+}
+
+func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ panic("Not implemented")
+}
+
+const (
+ TransitionEPSILON = 1
+ TransitionRANGE = 2
+ TransitionRULE = 3
+ TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
+ TransitionATOM = 5
+ TransitionACTION = 6
+ TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
+ TransitionNOTSET = 8
+ TransitionWILDCARD = 9
+ TransitionPRECEDENCE = 10
+)
+
+var TransitionserializationNames = []string{
+ "INVALID",
+ "EPSILON",
+ "RANGE",
+ "RULE",
+ "PREDICATE",
+ "ATOM",
+ "ACTION",
+ "SET",
+ "NOT_SET",
+ "WILDCARD",
+ "PRECEDENCE",
+}
+
+//var TransitionserializationTypes struct {
+// EpsilonTransition int
+// RangeTransition int
+// RuleTransition int
+// PredicateTransition int
+// AtomTransition int
+// ActionTransition int
+// SetTransition int
+// NotSetTransition int
+// WildcardTransition int
+// PrecedencePredicateTransition int
+//}{
+// TransitionEPSILON,
+// TransitionRANGE,
+// TransitionRULE,
+// TransitionPREDICATE,
+// TransitionATOM,
+// TransitionACTION,
+// TransitionSET,
+// TransitionNOTSET,
+// TransitionWILDCARD,
+// TransitionPRECEDENCE
+//}
+
+// TODO: make all transitions sets? no, should remove set edges
+type AtomTransition struct {
+ *BaseTransition
+}
+
+func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
+
+ t := new(AtomTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.label = intervalSet // The token type or character value or, signifies special intervalSet.
+ t.intervalSet = t.makeLabel()
+ t.serializationType = TransitionATOM
+
+ return t
+}
+
+func (t *AtomTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addOne(t.label)
+ return s
+}
+
+func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return t.label == symbol
+}
+
+func (t *AtomTransition) String() string {
+ return strconv.Itoa(t.label)
+}
+
+type RuleTransition struct {
+ *BaseTransition
+
+ followState ATNState
+ ruleIndex, precedence int
+}
+
+func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
+
+ t := new(RuleTransition)
+ t.BaseTransition = NewBaseTransition(ruleStart)
+
+ t.ruleIndex = ruleIndex
+ t.precedence = precedence
+ t.followState = followState
+ t.serializationType = TransitionRULE
+ t.isEpsilon = true
+
+ return t
+}
+
+func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+type EpsilonTransition struct {
+ *BaseTransition
+
+ outermostPrecedenceReturn int
+}
+
+func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
+
+ t := new(EpsilonTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionEPSILON
+ t.isEpsilon = true
+ t.outermostPrecedenceReturn = outermostPrecedenceReturn
+ return t
+}
+
+func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *EpsilonTransition) String() string {
+ return "epsilon"
+}
+
+type RangeTransition struct {
+ *BaseTransition
+
+ start, stop int
+}
+
+func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
+
+ t := new(RangeTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionRANGE
+ t.start = start
+ t.stop = stop
+ t.intervalSet = t.makeLabel()
+ return t
+}
+
+func (t *RangeTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addRange(t.start, t.stop)
+ return s
+}
+
+func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= t.start && symbol <= t.stop
+}
+
+func (t *RangeTransition) String() string {
+ var sb strings.Builder
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(t.start))
+ sb.WriteString("'..'")
+ sb.WriteRune(rune(t.stop))
+ sb.WriteByte('\'')
+ return sb.String()
+}
+
+type AbstractPredicateTransition interface {
+ Transition
+ IAbstractPredicateTransitionFoo()
+}
+
+type BaseAbstractPredicateTransition struct {
+ *BaseTransition
+}
+
+func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
+
+ t := new(BaseAbstractPredicateTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ return t
+}
+
+func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
+
+type PredicateTransition struct {
+ *BaseAbstractPredicateTransition
+
+ isCtxDependent bool
+ ruleIndex, predIndex int
+}
+
+func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
+
+ t := new(PredicateTransition)
+ t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
+
+ t.serializationType = TransitionPREDICATE
+ t.ruleIndex = ruleIndex
+ t.predIndex = predIndex
+ t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ t.isEpsilon = true
+ return t
+}
+
+func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *PredicateTransition) getPredicate() *Predicate {
+ return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
+}
+
+func (t *PredicateTransition) String() string {
+ return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
+}
+
+type ActionTransition struct {
+ *BaseTransition
+
+ isCtxDependent bool
+ ruleIndex, actionIndex, predIndex int
+}
+
+func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
+
+ t := new(ActionTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionACTION
+ t.ruleIndex = ruleIndex
+ t.actionIndex = actionIndex
+ t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ t.isEpsilon = true
+ return t
+}
+
+func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *ActionTransition) String() string {
+ return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
+}
+
+type SetTransition struct {
+ *BaseTransition
+}
+
+func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
+
+ t := new(SetTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionSET
+ if set != nil {
+ t.intervalSet = set
+ } else {
+ t.intervalSet = NewIntervalSet()
+ t.intervalSet.addOne(TokenInvalidType)
+ }
+
+ return t
+}
+
+func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return t.intervalSet.contains(symbol)
+}
+
+func (t *SetTransition) String() string {
+ return t.intervalSet.String()
+}
+
+type NotSetTransition struct {
+ *SetTransition
+}
+
+func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
+
+ t := new(NotSetTransition)
+
+ t.SetTransition = NewSetTransition(target, set)
+
+ t.serializationType = TransitionNOTSET
+
+ return t
+}
+
+func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
+}
+
+func (t *NotSetTransition) String() string {
+ return "~" + t.intervalSet.String()
+}
+
+type WildcardTransition struct {
+ *BaseTransition
+}
+
+func NewWildcardTransition(target ATNState) *WildcardTransition {
+
+ t := new(WildcardTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionWILDCARD
+ return t
+}
+
+func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
+}
+
+func (t *WildcardTransition) String() string {
+ return "."
+}
+
+type PrecedencePredicateTransition struct {
+ *BaseAbstractPredicateTransition
+
+ precedence int
+}
+
+func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
+
+ t := new(PrecedencePredicateTransition)
+ t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
+
+ t.serializationType = TransitionPRECEDENCE
+ t.precedence = precedence
+ t.isEpsilon = true
+
+ return t
+}
+
+func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
+ return NewPrecedencePredicate(t.precedence)
+}
+
+func (t *PrecedencePredicateTransition) String() string {
+ return fmt.Sprint(t.precedence) + " >= _p"
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go
new file mode 100644
index 0000000..85b4f13
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go
@@ -0,0 +1,253 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The basic notion of a tree has a parent, a payload, and a list of children.
+// It is the most abstract interface for all the trees used by ANTLR.
+///
+
+var TreeInvalidInterval = NewInterval(-1, -2)
+
+type Tree interface {
+ GetParent() Tree
+ SetParent(Tree)
+ GetPayload() interface{}
+ GetChild(i int) Tree
+ GetChildCount() int
+ GetChildren() []Tree
+}
+
+type SyntaxTree interface {
+ Tree
+
+ GetSourceInterval() *Interval
+}
+
+type ParseTree interface {
+ SyntaxTree
+
+ Accept(Visitor ParseTreeVisitor) interface{}
+ GetText() string
+
+ ToStringTree([]string, Recognizer) string
+}
+
+type RuleNode interface {
+ ParseTree
+
+ GetRuleContext() RuleContext
+ GetBaseRuleContext() *BaseRuleContext
+}
+
+type TerminalNode interface {
+ ParseTree
+
+ GetSymbol() Token
+}
+
+type ErrorNode interface {
+ TerminalNode
+
+ errorNode()
+}
+
+type ParseTreeVisitor interface {
+ Visit(tree ParseTree) interface{}
+ VisitChildren(node RuleNode) interface{}
+ VisitTerminal(node TerminalNode) interface{}
+ VisitErrorNode(node ErrorNode) interface{}
+}
+
+type BaseParseTreeVisitor struct{}
+
+var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
+
+func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
+func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil }
+
+// TODO
+//func (this ParseTreeVisitor) Visit(ctx) {
+// if (Utils.isArray(ctx)) {
+// self := this
+// return ctx.map(function(child) { return VisitAtom(self, child)})
+// } else {
+// return VisitAtom(this, ctx)
+// }
+//}
+//
+//func VisitAtom(Visitor, ctx) {
+// if (ctx.parser == nil) { //is terminal
+// return
+// }
+//
+// name := ctx.parser.ruleNames[ctx.ruleIndex]
+// funcName := "Visit" + Utils.titleCase(name)
+//
+// return Visitor[funcName](ctx)
+//}
+
+type ParseTreeListener interface {
+ VisitTerminal(node TerminalNode)
+ VisitErrorNode(node ErrorNode)
+ EnterEveryRule(ctx ParserRuleContext)
+ ExitEveryRule(ctx ParserRuleContext)
+}
+
+type BaseParseTreeListener struct{}
+
+var _ ParseTreeListener = &BaseParseTreeListener{}
+
+func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {}
+func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {}
+func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {}
+func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {}
+
+type TerminalNodeImpl struct {
+ parentCtx RuleContext
+
+ symbol Token
+}
+
+var _ TerminalNode = &TerminalNodeImpl{}
+
+func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
+ tn := new(TerminalNodeImpl)
+
+ tn.parentCtx = nil
+ tn.symbol = symbol
+
+ return tn
+}
+
+func (t *TerminalNodeImpl) GetChild(i int) Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) GetChildren() []Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) SetChildren(tree []Tree) {
+ panic("Cannot set children on terminal node")
+}
+
+func (t *TerminalNodeImpl) GetSymbol() Token {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetParent() Tree {
+ return t.parentCtx
+}
+
+func (t *TerminalNodeImpl) SetParent(tree Tree) {
+ t.parentCtx = tree.(RuleContext)
+}
+
+func (t *TerminalNodeImpl) GetPayload() interface{} {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetSourceInterval() *Interval {
+ if t.symbol == nil {
+ return TreeInvalidInterval
+ }
+ tokenIndex := t.symbol.GetTokenIndex()
+ return NewInterval(tokenIndex, tokenIndex)
+}
+
+func (t *TerminalNodeImpl) GetChildCount() int {
+ return 0
+}
+
+func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitTerminal(t)
+}
+
+func (t *TerminalNodeImpl) GetText() string {
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) String() string {
+ if t.symbol.GetTokenType() == TokenEOF {
+ return ""
+ }
+
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string {
+ return t.String()
+}
+
+// Represents a token that was consumed during reSynchronization
+// rather than during a valid Match operation. For example,
+// we will create this kind of a node during single token insertion
+// and deletion as well as during "consume until error recovery set"
+// upon no viable alternative exceptions.
+
+type ErrorNodeImpl struct {
+ *TerminalNodeImpl
+}
+
+var _ ErrorNode = &ErrorNodeImpl{}
+
+func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
+ en := new(ErrorNodeImpl)
+ en.TerminalNodeImpl = NewTerminalNodeImpl(token)
+ return en
+}
+
+func (e *ErrorNodeImpl) errorNode() {}
+
+func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitErrorNode(e)
+}
+
+type ParseTreeWalker struct {
+}
+
+func NewParseTreeWalker() *ParseTreeWalker {
+ return new(ParseTreeWalker)
+}
+
+// Performs a walk on the given parse tree starting at the root and going down recursively
+// with depth-first search. On each node, EnterRule is called before
+// recursively walking down into child nodes, then
+// ExitRule is called after the recursive call to wind up.
+func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
+ switch tt := t.(type) {
+ case ErrorNode:
+ listener.VisitErrorNode(tt)
+ case TerminalNode:
+ listener.VisitTerminal(tt)
+ default:
+ p.EnterRule(listener, t.(RuleNode))
+ for i := 0; i < t.GetChildCount(); i++ {
+ child := t.GetChild(i)
+ p.Walk(listener, child)
+ }
+ p.ExitRule(listener, t.(RuleNode))
+ }
+}
+
+// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule}
+// then by triggering the event specific to the given parse tree node
+func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+}
+
+// Exits a grammar rule by first triggering the event specific to the given parse tree node
+// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
+func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+}
+
+var ParseTreeWalkerDefault = NewParseTreeWalker()
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go
new file mode 100644
index 0000000..d7dbb03
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go
@@ -0,0 +1,138 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "fmt"
+
+/** A set of utility routines useful for all kinds of ANTLR trees. */
+
+// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
+//
+// node payloads to get the text for the nodes. Detect
+// parse trees and extract data appropriately.
+func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
+
+ if recog != nil {
+ ruleNames = recog.GetRuleNames()
+ }
+
+ s := TreesGetNodeText(tree, ruleNames, nil)
+
+ s = EscapeWhitespace(s, false)
+ c := tree.GetChildCount()
+ if c == 0 {
+ return s
+ }
+ res := "(" + s + " "
+ if c > 0 {
+ s = TreesStringTree(tree.GetChild(0), ruleNames, nil)
+ res += s
+ }
+ for i := 1; i < c; i++ {
+ s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
+ res += (" " + s)
+ }
+ res += ")"
+ return res
+}
+
+func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
+ if recog != nil {
+ ruleNames = recog.GetRuleNames()
+ }
+
+ if ruleNames != nil {
+ switch t2 := t.(type) {
+ case RuleNode:
+ t3 := t2.GetRuleContext()
+ altNumber := t3.GetAltNumber()
+
+ if altNumber != ATNInvalidAltNumber {
+ return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber)
+ }
+ return ruleNames[t3.GetRuleIndex()]
+ case ErrorNode:
+ return fmt.Sprint(t2)
+ case TerminalNode:
+ if t2.GetSymbol() != nil {
+ return t2.GetSymbol().GetText()
+ }
+ }
+ }
+
+ // no recog for rule names
+ payload := t.GetPayload()
+ if p2, ok := payload.(Token); ok {
+ return p2.GetText()
+ }
+
+ return fmt.Sprint(t.GetPayload())
+}
+
+// Return ordered list of all children of this node
+func TreesGetChildren(t Tree) []Tree {
+ list := make([]Tree, 0)
+ for i := 0; i < t.GetChildCount(); i++ {
+ list = append(list, t.GetChild(i))
+ }
+ return list
+}
+
+// Return a list of all ancestors of this node. The first node of
+//
+// list is the root and the last is the parent of this node.
+func TreesgetAncestors(t Tree) []Tree {
+ ancestors := make([]Tree, 0)
+ t = t.GetParent()
+ for t != nil {
+ f := []Tree{t}
+ ancestors = append(f, ancestors...)
+ t = t.GetParent()
+ }
+ return ancestors
+}
+
+func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
+ return TreesfindAllNodes(t, ttype, true)
+}
+
+func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
+ return TreesfindAllNodes(t, ruleIndex, false)
+}
+
+func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree {
+ nodes := make([]ParseTree, 0)
+ treesFindAllNodes(t, index, findTokens, &nodes)
+ return nodes
+}
+
+func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) {
+ // check this node (the root) first
+
+ t2, ok := t.(TerminalNode)
+ t3, ok2 := t.(ParserRuleContext)
+
+ if findTokens && ok {
+ if t2.GetSymbol().GetTokenType() == index {
+ *nodes = append(*nodes, t2)
+ }
+ } else if !findTokens && ok2 {
+ if t3.GetRuleIndex() == index {
+ *nodes = append(*nodes, t3)
+ }
+ }
+ // check children
+ for i := 0; i < t.GetChildCount(); i++ {
+ treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes)
+ }
+}
+
+func TreesDescendants(t ParseTree) []ParseTree {
+ nodes := []ParseTree{t}
+ for i := 0; i < t.GetChildCount(); i++ {
+ nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...)
+ }
+ return nodes
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go
new file mode 100644
index 0000000..9fad5d9
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go
@@ -0,0 +1,352 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/bits"
+ "strconv"
+ "strings"
+)
+
+func intMin(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func intMax(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// A simple integer stack
+
+type IntStack []int
+
+var ErrEmptyStack = errors.New("Stack is empty")
+
+func (s *IntStack) Pop() (int, error) {
+ l := len(*s) - 1
+ if l < 0 {
+ return 0, ErrEmptyStack
+ }
+ v := (*s)[l]
+ *s = (*s)[0:l]
+ return v, nil
+}
+
+func (s *IntStack) Push(e int) {
+ *s = append(*s, e)
+}
+
+type comparable interface {
+ Equals(other Collectable[any]) bool
+}
+
+func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool {
+
+ return a.Equals(b)
+}
+
+func standardHashFunction(a interface{}) int {
+ if h, ok := a.(hasher); ok {
+ return h.Hash()
+ }
+
+ panic("Not Hasher")
+}
+
+type hasher interface {
+ Hash() int
+}
+
+const bitsPerWord = 64
+
+func indexForBit(bit int) int {
+ return bit / bitsPerWord
+}
+
+func wordForBit(data []uint64, bit int) uint64 {
+ idx := indexForBit(bit)
+ if idx >= len(data) {
+ return 0
+ }
+ return data[idx]
+}
+
+func maskForBit(bit int) uint64 {
+ return uint64(1) << (bit % bitsPerWord)
+}
+
+func wordsNeeded(bit int) int {
+ return indexForBit(bit) + 1
+}
+
+type BitSet struct {
+ data []uint64
+}
+
+func NewBitSet() *BitSet {
+ return &BitSet{}
+}
+
+func (b *BitSet) add(value int) {
+ idx := indexForBit(value)
+ if idx >= len(b.data) {
+ size := wordsNeeded(value)
+ data := make([]uint64, size)
+ copy(data, b.data)
+ b.data = data
+ }
+ b.data[idx] |= maskForBit(value)
+}
+
+func (b *BitSet) clear(index int) {
+ idx := indexForBit(index)
+ if idx >= len(b.data) {
+ return
+ }
+ b.data[idx] &= ^maskForBit(index)
+}
+
+func (b *BitSet) or(set *BitSet) {
+ // Get min size necessary to represent the bits in both sets.
+ bLen := b.minLen()
+ setLen := set.minLen()
+ maxLen := intMax(bLen, setLen)
+ if maxLen > len(b.data) {
+ // Increase the size of len(b.data) to repesent the bits in both sets.
+ data := make([]uint64, maxLen)
+ copy(data, b.data)
+ b.data = data
+ }
+ // len(b.data) is at least setLen.
+ for i := 0; i < setLen; i++ {
+ b.data[i] |= set.data[i]
+ }
+}
+
+func (b *BitSet) remove(value int) {
+ b.clear(value)
+}
+
+func (b *BitSet) contains(value int) bool {
+ idx := indexForBit(value)
+ if idx >= len(b.data) {
+ return false
+ }
+ return (b.data[idx] & maskForBit(value)) != 0
+}
+
+func (b *BitSet) minValue() int {
+ for i, v := range b.data {
+ if v == 0 {
+ continue
+ }
+ return i*bitsPerWord + bits.TrailingZeros64(v)
+ }
+ return 2147483647
+}
+
+func (b *BitSet) equals(other interface{}) bool {
+ otherBitSet, ok := other.(*BitSet)
+ if !ok {
+ return false
+ }
+
+ if b == otherBitSet {
+ return true
+ }
+
+ // We only compare set bits, so we cannot rely on the two slices having the same size. Its
+ // possible for two BitSets to have different slice lengths but the same set bits. So we only
+ // compare the relevant words and ignore the trailing zeros.
+ bLen := b.minLen()
+ otherLen := otherBitSet.minLen()
+
+ if bLen != otherLen {
+ return false
+ }
+
+ for i := 0; i < bLen; i++ {
+ if b.data[i] != otherBitSet.data[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *BitSet) minLen() int {
+ for i := len(b.data); i > 0; i-- {
+ if b.data[i-1] != 0 {
+ return i
+ }
+ }
+ return 0
+}
+
+func (b *BitSet) length() int {
+ cnt := 0
+ for _, val := range b.data {
+ cnt += bits.OnesCount64(val)
+ }
+ return cnt
+}
+
+func (b *BitSet) String() string {
+ vals := make([]string, 0, b.length())
+
+ for i, v := range b.data {
+ for v != 0 {
+ n := bits.TrailingZeros64(v)
+ vals = append(vals, strconv.Itoa(i*bitsPerWord+n))
+ v &= ^(uint64(1) << n)
+ }
+ }
+
+ return "{" + strings.Join(vals, ", ") + "}"
+}
+
+type AltDict struct {
+ data map[string]interface{}
+}
+
+func NewAltDict() *AltDict {
+ d := new(AltDict)
+ d.data = make(map[string]interface{})
+ return d
+}
+
+func (a *AltDict) Get(key string) interface{} {
+ key = "k-" + key
+ return a.data[key]
+}
+
+func (a *AltDict) put(key string, value interface{}) {
+ key = "k-" + key
+ a.data[key] = value
+}
+
+func (a *AltDict) values() []interface{} {
+ vs := make([]interface{}, len(a.data))
+ i := 0
+ for _, v := range a.data {
+ vs[i] = v
+ i++
+ }
+ return vs
+}
+
+type DoubleDict struct {
+ data map[int]map[int]interface{}
+}
+
+func NewDoubleDict() *DoubleDict {
+ dd := new(DoubleDict)
+ dd.data = make(map[int]map[int]interface{})
+ return dd
+}
+
+func (d *DoubleDict) Get(a, b int) interface{} {
+ data := d.data[a]
+
+ if data == nil {
+ return nil
+ }
+
+ return data[b]
+}
+
+func (d *DoubleDict) set(a, b int, o interface{}) {
+ data := d.data[a]
+
+ if data == nil {
+ data = make(map[int]interface{})
+ d.data[a] = data
+ }
+
+ data[b] = o
+}
+
+func EscapeWhitespace(s string, escapeSpaces bool) string {
+
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ if escapeSpaces {
+ s = strings.Replace(s, " ", "\u00B7", -1)
+ }
+ return s
+}
+
+func TerminalNodeToStringArray(sa []TerminalNode) []string {
+ st := make([]string, len(sa))
+
+ for i, s := range sa {
+ st[i] = fmt.Sprintf("%v", s)
+ }
+
+ return st
+}
+
+func PrintArrayJavaStyle(sa []string) string {
+ var buffer bytes.Buffer
+
+ buffer.WriteString("[")
+
+ for i, s := range sa {
+ buffer.WriteString(s)
+ if i != len(sa)-1 {
+ buffer.WriteString(", ")
+ }
+ }
+
+ buffer.WriteString("]")
+
+ return buffer.String()
+}
+
+// murmur hash
+func murmurInit(seed int) int {
+ return seed
+}
+
+func murmurUpdate(h int, value int) int {
+ const c1 uint32 = 0xCC9E2D51
+ const c2 uint32 = 0x1B873593
+ const r1 uint32 = 15
+ const r2 uint32 = 13
+ const m uint32 = 5
+ const n uint32 = 0xE6546B64
+
+ k := uint32(value)
+ k *= c1
+ k = (k << r1) | (k >> (32 - r1))
+ k *= c2
+
+ hash := uint32(h) ^ k
+ hash = (hash << r2) | (hash >> (32 - r2))
+ hash = hash*m + n
+ return int(hash)
+}
+
+func murmurFinish(h int, numberOfWords int) int {
+ var hash = uint32(h)
+ hash ^= uint32(numberOfWords) << 2
+ hash ^= hash >> 16
+ hash *= 0x85ebca6b
+ hash ^= hash >> 13
+ hash *= 0xc2b2ae35
+ hash ^= hash >> 16
+
+ return int(hash)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
new file mode 100644
index 0000000..c9bd675
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
@@ -0,0 +1,235 @@
+package antlr
+
+import "math"
+
+const (
+ _initalCapacity = 16
+ _initalBucketCapacity = 8
+ _loadFactor = 0.75
+)
+
+type Set interface {
+ Add(value interface{}) (added interface{})
+ Len() int
+ Get(value interface{}) (found interface{})
+ Contains(value interface{}) bool
+ Values() []interface{}
+ Each(f func(interface{}) bool)
+}
+
+type array2DHashSet struct {
+ buckets [][]Collectable[any]
+ hashcodeFunction func(interface{}) int
+ equalsFunction func(Collectable[any], Collectable[any]) bool
+
+ n int // How many elements in set
+ threshold int // when to expand
+
+ currentPrime int // jump by 4 primes each expand or whatever
+ initialBucketCapacity int
+}
+
+func (as *array2DHashSet) Each(f func(interface{}) bool) {
+ if as.Len() < 1 {
+ return
+ }
+
+ for _, bucket := range as.buckets {
+ for _, o := range bucket {
+ if o == nil {
+ break
+ }
+ if !f(o) {
+ return
+ }
+ }
+ }
+}
+
+func (as *array2DHashSet) Values() []interface{} {
+ if as.Len() < 1 {
+ return nil
+ }
+
+ values := make([]interface{}, 0, as.Len())
+ as.Each(func(i interface{}) bool {
+ values = append(values, i)
+ return true
+ })
+ return values
+}
+
+func (as *array2DHashSet) Contains(value Collectable[any]) bool {
+ return as.Get(value) != nil
+}
+
+func (as *array2DHashSet) Add(value Collectable[any]) interface{} {
+ if as.n > as.threshold {
+ as.expand()
+ }
+ return as.innerAdd(value)
+}
+
+func (as *array2DHashSet) expand() {
+ old := as.buckets
+
+ as.currentPrime += 4
+
+ var (
+ newCapacity = len(as.buckets) << 1
+ newTable = as.createBuckets(newCapacity)
+ newBucketLengths = make([]int, len(newTable))
+ )
+
+ as.buckets = newTable
+ as.threshold = int(float64(newCapacity) * _loadFactor)
+
+ for _, bucket := range old {
+ if bucket == nil {
+ continue
+ }
+
+ for _, o := range bucket {
+ if o == nil {
+ break
+ }
+
+ b := as.getBuckets(o)
+ bucketLength := newBucketLengths[b]
+ var newBucket []Collectable[any]
+ if bucketLength == 0 {
+ // new bucket
+ newBucket = as.createBucket(as.initialBucketCapacity)
+ newTable[b] = newBucket
+ } else {
+ newBucket = newTable[b]
+ if bucketLength == len(newBucket) {
+ // expand
+ newBucketCopy := make([]Collectable[any], len(newBucket)<<1)
+ copy(newBucketCopy[:bucketLength], newBucket)
+ newBucket = newBucketCopy
+ newTable[b] = newBucket
+ }
+ }
+
+ newBucket[bucketLength] = o
+ newBucketLengths[b]++
+ }
+ }
+}
+
+func (as *array2DHashSet) Len() int {
+ return as.n
+}
+
+func (as *array2DHashSet) Get(o Collectable[any]) interface{} {
+ if o == nil {
+ return nil
+ }
+
+ b := as.getBuckets(o)
+ bucket := as.buckets[b]
+ if bucket == nil { // no bucket
+ return nil
+ }
+
+ for _, e := range bucket {
+ if e == nil {
+ return nil // empty slot; not there
+ }
+ if as.equalsFunction(e, o) {
+ return e
+ }
+ }
+
+ return nil
+}
+
+func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} {
+ b := as.getBuckets(o)
+
+ bucket := as.buckets[b]
+
+ // new bucket
+ if bucket == nil {
+ bucket = as.createBucket(as.initialBucketCapacity)
+ bucket[0] = o
+
+ as.buckets[b] = bucket
+ as.n++
+ return o
+ }
+
+ // look for it in bucket
+ for i := 0; i < len(bucket); i++ {
+ existing := bucket[i]
+ if existing == nil { // empty slot; not there, add.
+ bucket[i] = o
+ as.n++
+ return o
+ }
+
+ if as.equalsFunction(existing, o) { // found existing, quit
+ return existing
+ }
+ }
+
+ // full bucket, expand and add to end
+ oldLength := len(bucket)
+ bucketCopy := make([]Collectable[any], oldLength<<1)
+ copy(bucketCopy[:oldLength], bucket)
+ bucket = bucketCopy
+ as.buckets[b] = bucket
+ bucket[oldLength] = o
+ as.n++
+ return o
+}
+
+func (as *array2DHashSet) getBuckets(value Collectable[any]) int {
+ hash := as.hashcodeFunction(value)
+ return hash & (len(as.buckets) - 1)
+}
+
+func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] {
+ return make([][]Collectable[any], cap)
+}
+
+func (as *array2DHashSet) createBucket(cap int) []Collectable[any] {
+ return make([]Collectable[any], cap)
+}
+
+func newArray2DHashSetWithCap(
+ hashcodeFunction func(interface{}) int,
+ equalsFunction func(Collectable[any], Collectable[any]) bool,
+ initCap int,
+ initBucketCap int,
+) *array2DHashSet {
+ if hashcodeFunction == nil {
+ hashcodeFunction = standardHashFunction
+ }
+
+ if equalsFunction == nil {
+ equalsFunction = standardEqualsFunction
+ }
+
+ ret := &array2DHashSet{
+ hashcodeFunction: hashcodeFunction,
+ equalsFunction: equalsFunction,
+
+ n: 0,
+ threshold: int(math.Floor(_initalCapacity * _loadFactor)),
+
+ currentPrime: 1,
+ initialBucketCapacity: initBucketCap,
+ }
+
+ ret.buckets = ret.createBuckets(initCap)
+ return ret
+}
+
+func newArray2DHashSet(
+ hashcodeFunction func(interface{}) int,
+ equalsFunction func(Collectable[any], Collectable[any]) bool,
+) *array2DHashSet {
+ return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
+}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..bc52e96
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..7929947
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+ // flagRO indicates whether the value field of a reflect.Value
+ // is read-only.
+ flagRO flag
+
+ // flagAddr indicates whether the address of the reflect.Value's
+ // value may be taken.
+ flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+ ro, addr flag
+}{{
+ // From Go 1.4 to 1.5
+ ro: 1 << 5,
+ addr: 1 << 7,
+}, {
+ // Up to Go tip.
+ ro: 1<<5 | 1<<6,
+ addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+ return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+ return v
+ }
+ flagFieldPtr := flagField(&v)
+ *flagFieldPtr &^= flagRO
+ *flagFieldPtr |= flagAddr
+ return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+ panic("reflect.Value flag field has changed kind")
+ }
+ type t0 int
+ var t struct {
+ A t0
+ // t0 will have flagEmbedRO set.
+ t0
+ // a will have flagStickyRO set
+ a t0
+ }
+ vA := reflect.ValueOf(t).FieldByName("A")
+ va := reflect.ValueOf(t).FieldByName("a")
+ vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+ // Infer flagRO from the difference between the flags
+ // for the (otherwise identical) fields in t.
+ flagPublic := *flagField(&vA)
+ flagWithRO := *flagField(&va) | *flagField(&vt0)
+ flagRO = flagPublic ^ flagWithRO
+
+ // Infer flagAddr from the difference between a value
+ // taken from a pointer and not.
+ vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+ flagNoPtr := *flagField(&vA)
+ flagPtr := *flagField(&vPtrA)
+ flagAddr = flagNoPtr ^ flagPtr
+
+ // Check that the inferred flags tally with one of the known versions.
+ for _, f := range okFlags {
+ if flagRO == f.ro && flagAddr == f.addr {
+ return
+ }
+ }
+ panic("reflect.Value read-only flag has changed semantics")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..205c28d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..1be8ce9
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("")
+ maxNewlineBytes = []byte("\n")
+ maxShortBytes = []byte("")
+ circularBytes = []byte("")
+ circularShortBytes = []byte("")
+ invalidAngleBytes = []byte("")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..2e3d22f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..aacaac6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr)
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..f78d89f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..b04edb7
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..32c0e33
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml
new file mode 100644
index 0000000..0cffafa
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/.golangci.yaml
@@ -0,0 +1,26 @@
+run:
+ timeout: 1m
+ tests: true
+
+linters:
+ disable-all: true
+ enable:
+ - asciicheck
+ - errcheck
+ - forcetypeassert
+ - gocritic
+ - gofmt
+ - goimports
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - revive
+ - staticcheck
+ - typecheck
+ - unused
+
+issues:
+ exclude-use-default: false
+ max-issues-per-linter: 0
+ max-same-issues: 10
diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md
new file mode 100644
index 0000000..c356960
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/CHANGELOG.md
@@ -0,0 +1,6 @@
+# CHANGELOG
+
+## v1.0.0-rc1
+
+This is the first logged release. Major changes (including breaking changes)
+have occurred since earlier tags.
diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md
new file mode 100644
index 0000000..5d37e29
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md
@@ -0,0 +1,17 @@
+# Contributing
+
+Logr is open to pull-requests, provided they fit within the intended scope of
+the project. Specifically, this library aims to be VERY small and minimalist,
+with no external dependencies.
+
+## Compatibility
+
+This project intends to follow [semantic versioning](http://semver.org) and
+is very strict about compatibility. Any proposed changes MUST follow those
+rules.
+
+## Performance
+
+As a logging library, logr must be as light-weight as possible. Any proposed
+code change must include results of running the [benchmark](./benchmark)
+before and after the change.
diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
new file mode 100644
index 0000000..7c7f0c6
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/README.md
@@ -0,0 +1,407 @@
+# A minimal logging API for Go
+
+[](https://pkg.go.dev/github.com/go-logr/logr)
+[](https://goreportcard.com/report/github.com/go-logr/logr)
+[](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
+
+logr offers an(other) opinion on how Go programs and libraries can do logging
+without becoming coupled to a particular logging implementation. This is not
+an implementation of logging - it is an API. In fact it is two APIs with two
+different sets of users.
+
+The `Logger` type is intended for application and library authors. It provides
+a relatively small API which can be used everywhere you want to emit logs. It
+defers the actual act of writing logs (to files, to stdout, or whatever) to the
+`LogSink` interface.
+
+The `LogSink` interface is intended for logging library implementers. It is a
+pure interface which can be implemented by logging frameworks to provide the actual logging
+functionality.
+
+This decoupling allows application and library developers to write code in
+terms of `logr.Logger` (which has very low dependency fan-out) while the
+implementation of logging is managed "up stack" (e.g. in or near `main()`.)
+Application developers can then switch out implementations as necessary.
+
+Many people assert that libraries should not be logging, and as such efforts
+like this are pointless. Those people are welcome to convince the authors of
+the tens-of-thousands of libraries that *DO* write logs that they are all
+wrong. In the meantime, logr takes a more practical approach.
+
+## Typical usage
+
+Somewhere, early in an application's life, it will make a decision about which
+logging library (implementation) it actually wants to use. Something like:
+
+```
+ func main() {
+ // ... other setup code ...
+
+ // Create the "root" logger. We have chosen the "logimpl" implementation,
+ // which takes some initial parameters and returns a logr.Logger.
+ logger := logimpl.New(param1, param2)
+
+ // ... other setup code ...
+```
+
+Most apps will call into other libraries, create structures to govern the flow,
+etc. The `logr.Logger` object can be passed to these other libraries, stored
+in structs, or even used as a package-global variable, if needed. For example:
+
+```
+ app := createTheAppObject(logger)
+ app.Run()
+```
+
+Outside of this early setup, no other packages need to know about the choice of
+implementation. They write logs in terms of the `logr.Logger` that they
+received:
+
+```
+ type appObject struct {
+ // ... other fields ...
+ logger logr.Logger
+ // ... other fields ...
+ }
+
+ func (app *appObject) Run() {
+ app.logger.Info("starting up", "timestamp", time.Now())
+
+ // ... app code ...
+```
+
+## Background
+
+If the Go standard library had defined an interface for logging, this project
+probably would not be needed. Alas, here we are.
+
+When the Go developers started developing such an interface with
+[slog](https://github.com/golang/go/issues/56345), they adopted some of the
+logr design but also left out some parts and changed others:
+
+| Feature | logr | slog |
+|---------|------|------|
+| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) |
+| Low-level API | `LogSink` | `Handler` |
+| Stack unwinding | done by `LogSink` | done by `Logger` |
+| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) |
+| Generating a value for logging on demand | `Marshaler` | `LogValuer` |
+| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" |
+| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` |
+| Passing logger via context | `NewContext`, `FromContext` | no API |
+| Adding a name to a logger | `WithName` | no API |
+| Modify verbosity of log entries in a call chain | `V` | no API |
+| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
+| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
+
+The high-level slog API is explicitly meant to be one of many different APIs
+that can be layered on top of a shared `slog.Handler`. logr is one such
+alternative API, with [interoperability](#slog-interoperability) provided by
+some conversion functions.
+
+### Inspiration
+
+Before you consider this package, please read [this blog post by the
+inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
+he has to say, and it largely aligns with our own experiences.
+
+### Differences from Dave's ideas
+
+The main differences are:
+
+1. Dave basically proposes doing away with the notion of a logging API in favor
+of `fmt.Printf()`. We disagree, especially when you consider things like output
+locations, timestamps, file and line decorations, and structured logging. This
+package restricts the logging API to just 2 types of logs: info and error.
+
+Info logs are things you want to tell the user which are not errors. Error
+logs are, well, errors. If your code receives an `error` from a subordinate
+function call and is logging that `error` *and not returning it*, use error
+logs.
+
+2. Verbosity-levels on info logs. This gives developers a chance to indicate
+arbitrary grades of importance for info logs, without assigning names with
+semantic meaning such as "warning", "trace", and "debug." Superficially this
+may feel very similar, but the primary difference is the lack of semantics.
+Because verbosity is a numerical value, it's safe to assume that an app running
+with higher verbosity means more (and less important) logs will be generated.
+
+## Implementations (non-exhaustive)
+
+There are implementations for the following logging libraries:
+
+- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
+- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
+- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
+- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
+- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
+- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
+- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
+- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
+- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
+- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
+- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
+- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
+- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
+
+## slog interoperability
+
+Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
+and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
+`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
+As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
+slog API.
+
+### Using a `logr.LogSink` as backend for slog
+
+Ideally, a logr sink implementation should support both logr and slog by
+implementing both the normal logr interface(s) and `SlogSink`. Because
+of a conflict in the parameters of the common `Enabled` method, it is [not
+possible to implement both slog.Handler and logr.Sink in the same
+type](https://github.com/golang/go/issues/59110).
+
+If both are supported, log calls can go from the high-level APIs to the backend
+without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
+convert back and forth without adding additional wrappers, with one exception:
+when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
+`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
+log calls.
+
+Such an implementation should also support values that implement specific
+interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`,
+`slog.GroupValue`). logr does not convert those.
+
+Not supporting slog has several drawbacks:
+- Recording source code locations works correctly if the handler gets called
+ through `slog.Logger`, but may be wrong in other cases. That's because a
+ `logr.Sink` does its own stack unwinding instead of using the program counter
+ provided by the high-level API.
+- slog levels <= 0 can be mapped to logr levels by negating the level without a
+ loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as
+ used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink
+ because logr does not support "more important than info" levels.
+- The slog group concept is supported by prefixing each key in a key/value
+ pair with the group names, separated by a dot. For structured output like
+ JSON it would be better to group the key/value pairs inside an object.
+- Special slog values and interfaces don't work as expected.
+- The overhead is likely to be higher.
+
+These drawbacks are severe enough that applications using a mixture of slog and
+logr should switch to a different backend.
+
+### Using a `slog.Handler` as backend for logr
+
+Using a plain `slog.Handler` without support for logr works better than the
+other direction:
+- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
+ by negating them.
+- Stack unwinding is done by the `SlogSink` and the resulting program
+ counter is passed to the `slog.Handler`.
+- Names added via `Logger.WithName` are gathered and recorded in an additional
+ attribute with `logger` as key and the names separated by slash as value.
+- `Logger.Error` is turned into a log record with `slog.LevelError` as level
+ and an additional attribute with `err` as key, if an error was provided.
+
+The main drawback is that `logr.Marshaler` will not be supported. Types should
+ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
+with logr implementations without slog support is not important, then
+`slog.Valuer` is sufficient.
+
+### Context support for slog
+
+Storing a logger in a `context.Context` is not supported by
+slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
+used to fill this gap. They store and retrieve a `slog.Logger` pointer
+under the same context key that is also used by `NewContext` and
+`FromContext` for `logr.Logger` value.
+
+When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
+automatically convert the `slog.Logger` to a
+`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
+
+With this approach, binaries which use either slog or logr are as efficient as
+possible with no unnecessary allocations. This is also why the API stores a
+`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
+on retrieval would need to allocate one.
+
+The downside is that switching back and forth needs more allocations. Because
+logr is the API that is already in use by different packages, in particular
+Kubernetes, the recommendation is to use the `logr.Logger` API in code which
+uses contextual logging.
+
+An alternative to adding values to a logger and storing that logger in the
+context is to store the values in the context and to configure a logging
+backend to extract those values when emitting log entries. This only works when
+log calls are passed the context, which is not supported by the logr API.
+
+With the slog API, it is possible, but not
+required. https://github.com/veqryn/slog-context is a package for slog which
+provides additional support code for this approach. It also contains wrappers
+for the context functions in logr, so developers who prefer to not use the logr
+APIs directly can use those instead and the resulting code will still be
+interoperable with logr.
+
+## FAQ
+
+### Conceptual
+
+#### Why structured logging?
+
+- **Structured logs are more easily queryable**: Since you've got
+ key-value pairs, it's much easier to query your structured logs for
+ particular values by filtering on the contents of a particular key --
+ think searching request logs for error codes, Kubernetes reconcilers for
+ the name and namespace of the reconciled object, etc.
+
+- **Structured logging makes it easier to have cross-referenceable logs**:
+ Similarly to searchability, if you maintain conventions around your
+ keys, it becomes easy to gather all log lines related to a particular
+ concept.
+
+- **Structured logs allow better dimensions of filtering**: if you have
+ structure to your logs, you've got more precise control over how much
+ information is logged -- you might choose in a particular configuration
+ to log certain keys but not others, only log lines where a certain key
+ matches a certain value, etc., instead of just having v-levels and names
+ to key off of.
+
+- **Structured logs better represent structured data**: sometimes, the
+ data that you want to log is inherently structured (think tuple-link
+ objects.) Structured logs allow you to preserve that structure when
+ outputting.
+
+#### Why V-levels?
+
+**V-levels give operators an easy way to control the chattiness of log
+operations**. V-levels provide a way for a given package to distinguish
+the relative importance or verbosity of a given log message. Then, if
+a particular logger or package is logging too many messages, the user
+of the package can simply change the v-levels for that library.
+
+#### Why not named levels, like Info/Warning/Error?
+
+Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
+from Dave's ideas](#differences-from-daves-ideas).
+
+#### Why not allow format strings, too?
+
+**Format strings negate many of the benefits of structured logs**:
+
+- They're not easily searchable without resorting to fuzzy searching,
+ regular expressions, etc.
+
+- They don't store structured data well, since contents are flattened into
+ a string.
+
+- They're not cross-referenceable.
+
+- They don't compress easily, since the message is not constant.
+
+(Unless you turn positional parameters into key-value pairs with numerical
+keys, at which point you've gotten key-value logging with meaningless
+keys.)
+
+### Practical
+
+#### Why key-value pairs, and not a map?
+
+Key-value pairs are *much* easier to optimize, especially around
+allocations. Zap (a structured logger that inspired logr's interface) has
+[performance measurements](https://github.com/uber-go/zap#performance)
+that show this quite nicely.
+
+While the interface ends up being a little less obvious, you get
+potentially better performance, plus avoid making users type
+`map[string]string{}` every time they want to log.
+
+#### What if my V-levels differ between libraries?
+
+That's fine. Control your V-levels on a per-logger basis, and use the
+`WithName` method to pass different loggers to different libraries.
+
+Generally, you should take care to ensure that you have relatively
+consistent V-levels within a given logger, however, as this makes deciding
+on what verbosity of logs to request easier.
+
+#### But I really want to use a format string!
+
+That's not actually a question. Assuming your question is "how do
+I convert my mental model of logging with format strings to logging with
+constant messages":
+
+1. Figure out what the error actually is, as you'd write in a TL;DR style,
+ and use that as a message.
+
+2. For every place you'd write a format specifier, look to the word before
+ it, and add that as a key value pair.
+
+For instance, consider the following examples (all taken from spots in the
+Kubernetes codebase):
+
+- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
+ responseCode, err)` becomes `logger.Error(err, "client returned an
+ error", "code", responseCode)`
+
+- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
+ seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
+ response when requesting url", "attempt", retries, "after
+ seconds", seconds, "url", url)`
+
+If you *really* must use a format string, use it in a key's value, and
+call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
+reflect over type %T")` becomes `logger.Info("unable to reflect over
+type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
+this is necessary should be few and far between.
+
+#### How do I choose my V-levels?
+
+This is basically the only hard constraint: increase V-levels to denote
+more verbose or more debug-y logs.
+
+Otherwise, you can start out with `0` as "you always want to see this",
+`1` as "common logging that you might *possibly* want to turn off", and
+`10` as "I would like to performance-test your log collection stack."
+
+Then gradually choose levels in between as you need them, working your way
+down from 10 (for debug and trace style logs) and up from 1 (for chattier
+info-type logs). For reference, slog pre-defines -4 for debug logs
+(corresponds to 4 in logr), which matches what is
+[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
+
+#### How do I choose my keys?
+
+Keys are fairly flexible, and can hold more or less any string
+value. For best compatibility with implementations and consistency
+with existing code in other projects, there are a few conventions you
+should consider.
+
+- Make your keys human-readable.
+- Constant keys are generally a good idea.
+- Be consistent across your codebase.
+- Keys should naturally match parts of the message string.
+- Use lower case for simple keys and
+ [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
+ more complex ones. Kubernetes is one example of a project that has
+ [adopted that
+ convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
+
+While key names are mostly unrestricted (and spaces are acceptable),
+it's generally a good idea to stick to printable ascii characters, or at
+least match the general character set of your log lines.
+
+#### Why should keys be constant values?
+
+The point of structured logging is to make later log processing easier. Your
+keys are, effectively, the schema of each log message. If you use different
+keys across instances of the same log line, you will make your structured logs
+much harder to use. `Sprintf()` is for values, not for keys!
+
+#### Why is this not a pure interface?
+
+The Logger type is implemented as a struct in order to allow the Go compiler to
+optimize things like high-V `Info` logs that are not triggered. Not all of
+these implementations are implemented yet, but this structure was suggested as
+a way to ensure they *can* be implemented. All of the real work is behind the
+`LogSink` interface.
+
+[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md
new file mode 100644
index 0000000..1ca756f
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/SECURITY.md
@@ -0,0 +1,18 @@
+# Security Policy
+
+If you have discovered a security vulnerability in this project, please report it
+privately. **Do not disclose it as a public issue.** This gives us time to work with you
+to fix the issue before public exposure, reducing the chance that the exploit will be
+used before a patch is released.
+
+You may submit the report in the following ways:
+
+- send an email to go-logr-security@googlegroups.com
+- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new)
+
+Please provide the following information in your report:
+
+- A description of the vulnerability and its impact
+- How to reproduce the issue
+
+We ask that you give us 90 days to work on a fix before public exposure.
diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go
new file mode 100644
index 0000000..de8bcc3
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
+// the value is always a Logger value. With Go >= 1.21, the value can be a
+// Logger value or a slog.Logger pointer.
+type contextKey struct{}
+
+// notFoundError exists to carry an IsNotFound method.
+type notFoundError struct{}
+
+func (notFoundError) Error() string {
+ return "no logr.Logger was present"
+}
+
+func (notFoundError) IsNotFound() bool {
+ return true
+}
diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go
new file mode 100644
index 0000000..f012f9a
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context_noslog.go
@@ -0,0 +1,49 @@
+//go:build !go1.21
+// +build !go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+ if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+ return v, nil
+ }
+
+ return Logger{}, notFoundError{}
+}
+
+// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+ if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+ return v
+ }
+
+ return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go
new file mode 100644
index 0000000..065ef0b
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context_slog.go
@@ -0,0 +1,83 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+ v := ctx.Value(contextKey{})
+ if v == nil {
+ return Logger{}, notFoundError{}
+ }
+
+ switch v := v.(type) {
+ case Logger:
+ return v, nil
+ case *slog.Logger:
+ return FromSlogHandler(v.Handler()), nil
+ default:
+ // Not reached.
+ panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+ }
+}
+
+// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
+func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
+ v := ctx.Value(contextKey{})
+ if v == nil {
+ return nil
+ }
+
+ switch v := v.(type) {
+ case Logger:
+ return slog.New(ToSlogHandler(v))
+ case *slog.Logger:
+ return v
+ default:
+ // Not reached.
+ panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+ }
+}
+
+// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+ if logger, err := FromContext(ctx); err == nil {
+ return logger
+ }
+ return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
+
+// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
+// provided slog.Logger.
+func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go
new file mode 100644
index 0000000..99fe8be
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/discard.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2020 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+// Discard returns a Logger that discards all messages logged to it. It can be
+// used whenever the caller is not interested in the logs. Logger instances
+// produced by this function always compare as equal.
+func Discard() Logger {
+ return New(nil)
+}
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
new file mode 100644
index 0000000..30568e7
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -0,0 +1,914 @@
+/*
+Copyright 2021 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package funcr implements formatting of structured log messages and
+// optionally captures the call site and timestamp.
+//
+// The simplest way to use it is via its implementation of a
+// github.com/go-logr/logr.LogSink with output through an arbitrary
+// "write" function. See New and NewJSON for details.
+//
+// # Custom LogSinks
+//
+// For users who need more control, a funcr.Formatter can be embedded inside
+// your own custom LogSink implementation. This is useful when the LogSink
+// needs to implement additional methods, for example.
+//
+// # Formatting
+//
+// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
+// values which are being logged. When rendering a struct, funcr will use Go's
+// standard JSON tags (all except "string").
+package funcr
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-logr/logr"
+)
+
+// New returns a logr.Logger which is implemented by an arbitrary function.
+func New(fn func(prefix, args string), opts Options) logr.Logger {
+ return logr.New(newSink(fn, NewFormatter(opts)))
+}
+
+// NewJSON returns a logr.Logger which is implemented by an arbitrary function
+// and produces JSON output.
+func NewJSON(fn func(obj string), opts Options) logr.Logger {
+ fnWrapper := func(_, obj string) {
+ fn(obj)
+ }
+ return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
+}
+
+// Underlier exposes access to the underlying logging function. Since
+// callers only have a logr.Logger, they have to know which
+// implementation is in use, so this interface is less of an
+// abstraction and more of a way to test type conversion.
+type Underlier interface {
+ GetUnderlying() func(prefix, args string)
+}
+
+func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
+ l := &fnlogger{
+ Formatter: formatter,
+ write: fn,
+ }
+ // For skipping fnlogger.Info and fnlogger.Error.
+ l.Formatter.AddCallDepth(1)
+ return l
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+ // LogCaller tells funcr to add a "caller" key to some or all log lines.
+ // This has some overhead, so some users might not want it.
+ LogCaller MessageClass
+
+ // LogCallerFunc tells funcr to also log the calling function name. This
+ // has no effect if caller logging is not enabled (see Options.LogCaller).
+ LogCallerFunc bool
+
+ // LogTimestamp tells funcr to add a "ts" key to log lines. This has some
+ // overhead, so some users might not want it.
+ LogTimestamp bool
+
+ // TimestampFormat tells funcr how to render timestamps when LogTimestamp
+ // is enabled. If not specified, a default format will be used. For more
+ // details, see docs for Go's time.Layout.
+ TimestampFormat string
+
+ // LogInfoLevel tells funcr what key to use to log the info level.
+ // If not specified, the info level will be logged as "level".
+ // If this is set to "", the info level will not be logged at all.
+ LogInfoLevel *string
+
+ // Verbosity tells funcr which V logs to produce. Higher values enable
+ // more logs. Info logs at or below this level will be written, while logs
+ // above this level will be discarded.
+ Verbosity int
+
+ // RenderBuiltinsHook allows users to mutate the list of key-value pairs
+ // while a log line is being rendered. The kvList argument follows logr
+ // conventions - each pair of slice elements is comprised of a string key
+ // and an arbitrary value (verified and sanitized before calling this
+ // hook). The value returned must follow the same conventions. This hook
+ // can be used to audit or modify logged data. For example, you might want
+ // to prefix all of funcr's built-in keys with some string. This hook is
+ // only called for built-in (provided by funcr itself) key-value pairs.
+ // Equivalent hooks are offered for key-value pairs saved via
+ // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
+ // for user-provided pairs (see RenderArgsHook).
+ RenderBuiltinsHook func(kvList []any) []any
+
+ // RenderValuesHook is the same as RenderBuiltinsHook, except that it is
+ // only called for key-value pairs saved via logr.Logger.WithValues. See
+ // RenderBuiltinsHook for more details.
+ RenderValuesHook func(kvList []any) []any
+
+ // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
+ // called for key-value pairs passed directly to Info and Error. See
+ // RenderBuiltinsHook for more details.
+ RenderArgsHook func(kvList []any) []any
+
+ // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
+ // that contains a struct, etc.) it may log. Every time it finds a struct,
+ // slice, array, or map the depth is increased by one. When the maximum is
+ // reached, the value will be converted to a string indicating that the max
+ // depth has been exceeded. If this field is not specified, a default
+ // value will be used.
+ MaxLogDepth int
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+ // None ignores all message classes.
+ None MessageClass = iota
+ // All considers all message classes.
+ All
+ // Info only considers info messages.
+ Info
+ // Error only considers error messages.
+ Error
+)
+
+// fnlogger inherits some of its LogSink implementation from Formatter
+// and just needs to add some glue code.
+type fnlogger struct {
+ Formatter
+ write func(prefix, args string)
+}
+
+func (l fnlogger) WithName(name string) logr.LogSink {
+ l.Formatter.AddName(name)
+ return &l
+}
+
+func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
+ l.Formatter.AddValues(kvList)
+ return &l
+}
+
+func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
+ l.Formatter.AddCallDepth(depth)
+ return &l
+}
+
+func (l fnlogger) Info(level int, msg string, kvList ...any) {
+ prefix, args := l.FormatInfo(level, msg, kvList)
+ l.write(prefix, args)
+}
+
+func (l fnlogger) Error(err error, msg string, kvList ...any) {
+ prefix, args := l.FormatError(err, msg, kvList)
+ l.write(prefix, args)
+}
+
+func (l fnlogger) GetUnderlying() func(prefix, args string) {
+ return l.write
+}
+
+// Assert conformance to the interfaces.
+var _ logr.LogSink = &fnlogger{}
+var _ logr.CallDepthLogSink = &fnlogger{}
+var _ Underlier = &fnlogger{}
+
+// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
+func NewFormatter(opts Options) Formatter {
+ return newFormatter(opts, outputKeyValue)
+}
+
+// NewFormatterJSON constructs a Formatter which emits strict JSON.
+func NewFormatterJSON(opts Options) Formatter {
+ return newFormatter(opts, outputJSON)
+}
+
+// Defaults for Options.
+const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
+const defaultMaxLogDepth = 16
+
+func newFormatter(opts Options, outfmt outputFormat) Formatter {
+ if opts.TimestampFormat == "" {
+ opts.TimestampFormat = defaultTimestampFormat
+ }
+ if opts.MaxLogDepth == 0 {
+ opts.MaxLogDepth = defaultMaxLogDepth
+ }
+ if opts.LogInfoLevel == nil {
+ opts.LogInfoLevel = new(string)
+ *opts.LogInfoLevel = "level"
+ }
+ f := Formatter{
+ outputFormat: outfmt,
+ prefix: "",
+ values: nil,
+ depth: 0,
+ opts: &opts,
+ }
+ return f
+}
+
+// Formatter is an opaque struct which can be embedded in a LogSink
+// implementation. It should be constructed with NewFormatter. Some of
+// its methods directly implement logr.LogSink.
+type Formatter struct {
+ outputFormat outputFormat
+ prefix string
+ values []any
+ valuesStr string
+ depth int
+ opts *Options
+ groupName string // for slog groups
+ groups []groupDef
+}
+
+// outputFormat indicates which outputFormat to use.
+type outputFormat int
+
+const (
+ // outputKeyValue emits a JSON-like key=value format, but not strict JSON.
+ outputKeyValue outputFormat = iota
+ // outputJSON emits strict JSON.
+ outputJSON
+)
+
+// groupDef represents a saved group. The values may be empty, but we don't
+// know if we need to render the group until the final record is rendered.
+type groupDef struct {
+ name string
+ values string
+}
+
+// PseudoStruct is a list of key-value pairs that gets logged as a struct.
+type PseudoStruct []any
+
+// render produces a log line, ready to use.
+func (f Formatter) render(builtins, args []any) string {
+ // Empirically bytes.Buffer is faster than strings.Builder for this.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('{') // for the whole record
+ }
+
+ // Render builtins
+ vals := builtins
+ if hook := f.opts.RenderBuiltinsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+ f.flatten(buf, vals, false) // keys are ours, no need to escape
+ continuing := len(builtins) > 0
+
+ // Turn the inner-most group into a string
+ argsStr := func() string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ vals = args
+ if hook := f.opts.RenderArgsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+ f.flatten(buf, vals, true) // escape user-provided keys
+
+ return buf.String()
+ }()
+
+ // Render the stack of groups from the inside out.
+ bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr)
+ for i := len(f.groups) - 1; i >= 0; i-- {
+ grp := &f.groups[i]
+ if grp.values == "" && bodyStr == "" {
+ // no contents, so we must elide the whole group
+ continue
+ }
+ bodyStr = f.renderGroup(grp.name, grp.values, bodyStr)
+ }
+
+ if bodyStr != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(bodyStr)
+ }
+
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('}') // for the whole record
+ }
+
+ return buf.String()
+}
+
+// renderGroup returns a string representation of the named group with rendered
+// values and args. If the name is empty, this will return the values and args,
+// joined. If the name is not empty, this will return a single key-value pair,
+// where the value is a grouping of the values and args. If the values and
+// args are both empty, this will return an empty string, even if the name was
+// specified.
+func (f Formatter) renderGroup(name string, values string, args string) string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ needClosingBrace := false
+ if name != "" && (values != "" || args != "") {
+ buf.WriteString(f.quoted(name, true)) // escape user-provided keys
+ buf.WriteByte(f.colon())
+ buf.WriteByte('{')
+ needClosingBrace = true
+ }
+
+ continuing := false
+ if values != "" {
+ buf.WriteString(values)
+ continuing = true
+ }
+
+ if args != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(args)
+ }
+
+ if needClosingBrace {
+ buf.WriteByte('}')
+ }
+
+ return buf.String()
+}
+
+// flatten renders a list of key-value pairs into a buffer. If escapeKeys is
+// true, the keys are assumed to have non-JSON-compatible characters in them
+// and must be evaluated for escapes.
+//
+// This function returns a potentially modified version of kvList, which
+// ensures that there is a value for every key (adding a value if needed) and
+// that each key is a string (substituting a key if needed).
+func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any {
+ // This logic overlaps with sanitize() but saves one type-cast per key,
+ // which can be measurable.
+ if len(kvList)%2 != 0 {
+ kvList = append(kvList, noValue)
+ }
+ copied := false
+ for i := 0; i < len(kvList); i += 2 {
+ k, ok := kvList[i].(string)
+ if !ok {
+ if !copied {
+ newList := make([]any, len(kvList))
+ copy(newList, kvList)
+ kvList = newList
+ copied = true
+ }
+ k = f.nonStringKey(kvList[i])
+ kvList[i] = k
+ }
+ v := kvList[i+1]
+
+ if i > 0 {
+ if f.outputFormat == outputJSON {
+ buf.WriteByte(f.comma())
+ } else {
+ // In theory the format could be something we don't understand. In
+ // practice, we control it, so it won't be.
+ buf.WriteByte(' ')
+ }
+ }
+
+ buf.WriteString(f.quoted(k, escapeKeys))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.pretty(v))
+ }
+ return kvList
+}
+
+func (f Formatter) quoted(str string, escape bool) string {
+ if escape {
+ return prettyString(str)
+ }
+ // this is faster
+ return `"` + str + `"`
+}
+
+func (f Formatter) comma() byte {
+ if f.outputFormat == outputJSON {
+ return ','
+ }
+ return ' '
+}
+
+func (f Formatter) colon() byte {
+ if f.outputFormat == outputJSON {
+ return ':'
+ }
+ return '='
+}
+
+func (f Formatter) pretty(value any) string {
+ return f.prettyWithFlags(value, 0, 0)
+}
+
+const (
+ flagRawStruct = 0x1 // do not print braces on structs
+)
+
+// TODO: This is not fast. Most of the overhead goes here.
+func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
+ if depth > f.opts.MaxLogDepth {
+ return `""`
+ }
+
+ // Handle types that take full control of logging.
+ if v, ok := value.(logr.Marshaler); ok {
+ // Replace the value with what the type wants to get logged.
+ // That then gets handled below via reflection.
+ value = invokeMarshaler(v)
+ }
+
+ // Handle types that want to format themselves.
+ switch v := value.(type) {
+ case fmt.Stringer:
+ value = invokeStringer(v)
+ case error:
+ value = invokeError(v)
+ }
+
+ // Handling the most common types without reflect is a small perf win.
+ switch v := value.(type) {
+ case bool:
+ return strconv.FormatBool(v)
+ case string:
+ return prettyString(v)
+ case int:
+ return strconv.FormatInt(int64(v), 10)
+ case int8:
+ return strconv.FormatInt(int64(v), 10)
+ case int16:
+ return strconv.FormatInt(int64(v), 10)
+ case int32:
+ return strconv.FormatInt(int64(v), 10)
+ case int64:
+ return strconv.FormatInt(int64(v), 10)
+ case uint:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint8:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint16:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint32:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint64:
+ return strconv.FormatUint(v, 10)
+ case uintptr:
+ return strconv.FormatUint(uint64(v), 10)
+ case float32:
+ return strconv.FormatFloat(float64(v), 'f', -1, 32)
+ case float64:
+ return strconv.FormatFloat(v, 'f', -1, 64)
+ case complex64:
+ return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
+ case complex128:
+ return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
+ case PseudoStruct:
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ v = f.sanitize(v)
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('{')
+ }
+ for i := 0; i < len(v); i += 2 {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ k, _ := v[i].(string) // sanitize() above means no need to check success
+ // arbitrary keys might need escaping
+ buf.WriteString(prettyString(k))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
+ }
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, 256))
+ t := reflect.TypeOf(value)
+ if t == nil {
+ return "null"
+ }
+ v := reflect.ValueOf(value)
+ switch t.Kind() {
+ case reflect.Bool:
+ return strconv.FormatBool(v.Bool())
+ case reflect.String:
+ return prettyString(v.String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(int64(v.Int()), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return strconv.FormatUint(uint64(v.Uint()), 10)
+ case reflect.Float32:
+ return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
+ case reflect.Float64:
+ return strconv.FormatFloat(v.Float(), 'f', -1, 64)
+ case reflect.Complex64:
+ return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
+ case reflect.Complex128:
+ return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
+ case reflect.Struct:
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('{')
+ }
+ printComma := false // testing i>0 is not enough because of JSON omitted fields
+ for i := 0; i < t.NumField(); i++ {
+ fld := t.Field(i)
+ if fld.PkgPath != "" {
+ // reflect says this field is only defined for non-exported fields.
+ continue
+ }
+ if !v.Field(i).CanInterface() {
+ // reflect isn't clear exactly what this means, but we can't use it.
+ continue
+ }
+ name := ""
+ omitempty := false
+ if tag, found := fld.Tag.Lookup("json"); found {
+ if tag == "-" {
+ continue
+ }
+ if comma := strings.Index(tag, ","); comma != -1 {
+ if n := tag[:comma]; n != "" {
+ name = n
+ }
+ rest := tag[comma:]
+ if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
+ omitempty = true
+ }
+ } else {
+ name = tag
+ }
+ }
+ if omitempty && isEmpty(v.Field(i)) {
+ continue
+ }
+ if printComma {
+ buf.WriteByte(f.comma())
+ }
+ printComma = true // if we got here, we are rendering a field
+ if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
+ buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
+ continue
+ }
+ if name == "" {
+ name = fld.Name
+ }
+ // field names can't contain characters which need escaping
+ buf.WriteString(f.quoted(name, false))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
+ }
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+ case reflect.Slice, reflect.Array:
+ // If this is outputing as JSON make sure this isn't really a json.RawMessage.
+ // If so just emit "as-is" and don't pretty it as that will just print
+ // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
+ if f.outputFormat == outputJSON {
+ if rm, ok := value.(json.RawMessage); ok {
+ // If it's empty make sure we emit an empty value as the array style would below.
+ if len(rm) > 0 {
+ buf.Write(rm)
+ } else {
+ buf.WriteString("null")
+ }
+ return buf.String()
+ }
+ }
+ buf.WriteByte('[')
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ e := v.Index(i)
+ buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
+ }
+ buf.WriteByte(']')
+ return buf.String()
+ case reflect.Map:
+ buf.WriteByte('{')
+ // This does not sort the map keys, for best perf.
+ it := v.MapRange()
+ i := 0
+ for it.Next() {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ // If a map key supports TextMarshaler, use it.
+ keystr := ""
+ if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
+ txt, err := m.MarshalText()
+ if err != nil {
+ keystr = fmt.Sprintf("", err.Error())
+ } else {
+ keystr = string(txt)
+ }
+ keystr = prettyString(keystr)
+ } else {
+ // prettyWithFlags will produce already-escaped values
+ keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
+ if t.Key().Kind() != reflect.String {
+ // JSON only does string keys. Unlike Go's standard JSON, we'll
+ // convert just about anything to a string.
+ keystr = prettyString(keystr)
+ }
+ }
+ buf.WriteString(keystr)
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
+ i++
+ }
+ buf.WriteByte('}')
+ return buf.String()
+ case reflect.Ptr, reflect.Interface:
+ if v.IsNil() {
+ return "null"
+ }
+ return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
+ }
+ return fmt.Sprintf(`""`, t.Kind().String())
+}
+
+func prettyString(s string) string {
+ // Avoid escaping (which does allocations) if we can.
+ if needsEscape(s) {
+ return strconv.Quote(s)
+ }
+ b := bytes.NewBuffer(make([]byte, 0, 1024))
+ b.WriteByte('"')
+ b.WriteString(s)
+ b.WriteByte('"')
+ return b.String()
+}
+
+// needsEscape determines whether the input string needs to be escaped or not,
+// without doing any allocations.
+func needsEscape(s string) bool {
+ for _, r := range s {
+ if !strconv.IsPrint(r) || r == '\\' || r == '"' {
+ return true
+ }
+ }
+ return false
+}
+
+func isEmpty(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Complex64, reflect.Complex128:
+ return v.Complex() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func invokeMarshaler(m logr.Marshaler) (ret any) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return m.MarshalLog()
+}
+
+func invokeStringer(s fmt.Stringer) (ret string) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return s.String()
+}
+
+func invokeError(e error) (ret string) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return e.Error()
+}
+
+// Caller represents the original call site for a log line, after considering
+// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
+// Line fields will always be provided, while the Func field is optional.
+// Users can set the render hook fields in Options to examine logged key-value
+// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
+// field is enabled for the given MessageClass.
+type Caller struct {
+ // File is the basename of the file for this call site.
+ File string `json:"file"`
+ // Line is the line number in the file for this call site.
+ Line int `json:"line"`
+ // Func is the function name for this call site, or empty if
+ // Options.LogCallerFunc is not enabled.
+ Func string `json:"function,omitempty"`
+}
+
+func (f Formatter) caller() Caller {
+ // +1 for this frame, +1 for Info/Error.
+ pc, file, line, ok := runtime.Caller(f.depth + 2)
+ if !ok {
+ return Caller{"", 0, ""}
+ }
+ fn := ""
+ if f.opts.LogCallerFunc {
+ if fp := runtime.FuncForPC(pc); fp != nil {
+ fn = fp.Name()
+ }
+ }
+
+ return Caller{filepath.Base(file), line, fn}
+}
+
+const noValue = ""
+
+func (f Formatter) nonStringKey(v any) string {
+ return fmt.Sprintf("", f.snippet(v))
+}
+
+// snippet produces a short snippet string of an arbitrary value.
+func (f Formatter) snippet(v any) string {
+ const snipLen = 16
+
+ snip := f.pretty(v)
+ if len(snip) > snipLen {
+ snip = snip[:snipLen]
+ }
+ return snip
+}
+
+// sanitize ensures that a list of key-value pairs has a value for every key
+// (adding a value if needed) and that each key is a string (substituting a key
+// if needed).
+func (f Formatter) sanitize(kvList []any) []any {
+ if len(kvList)%2 != 0 {
+ kvList = append(kvList, noValue)
+ }
+ for i := 0; i < len(kvList); i += 2 {
+ _, ok := kvList[i].(string)
+ if !ok {
+ kvList[i] = f.nonStringKey(kvList[i])
+ }
+ }
+ return kvList
+}
+
+// startGroup opens a new group scope (basically a sub-struct), which locks all
+// the current saved values and starts them anew. This is needed to satisfy
+// slog.
+func (f *Formatter) startGroup(name string) {
+ // Unnamed groups are just inlined.
+ if name == "" {
+ return
+ }
+
+ n := len(f.groups)
+ f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr})
+
+ // Start collecting new values.
+ f.groupName = name
+ f.valuesStr = ""
+ f.values = nil
+}
+
+// Init configures this Formatter from runtime info, such as the call depth
+// imposed by logr itself.
+// Note that this receiver is a pointer, so depth can be saved.
+func (f *Formatter) Init(info logr.RuntimeInfo) {
+ f.depth += info.CallDepth
+}
+
+// Enabled checks whether an info message at the given level should be logged.
+func (f Formatter) Enabled(level int) bool {
+ return level <= f.opts.Verbosity
+}
+
+// GetDepth returns the current depth of this Formatter. This is useful for
+// implementations which do their own caller attribution.
+func (f Formatter) GetDepth() int {
+ return f.depth
+}
+
+// FormatInfo renders an Info log message into strings. The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
+ prefix = f.prefix
+ if f.outputFormat == outputJSON {
+ args = append(args, "logger", prefix)
+ prefix = ""
+ }
+ if f.opts.LogTimestamp {
+ args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+ }
+ if policy := f.opts.LogCaller; policy == All || policy == Info {
+ args = append(args, "caller", f.caller())
+ }
+ if key := *f.opts.LogInfoLevel; key != "" {
+ args = append(args, key, level)
+ }
+ args = append(args, "msg", msg)
+ return prefix, f.render(args, kvList)
+}
+
+// FormatError renders an Error log message into strings. The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
+ prefix = f.prefix
+ if f.outputFormat == outputJSON {
+ args = append(args, "logger", prefix)
+ prefix = ""
+ }
+ if f.opts.LogTimestamp {
+ args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+ }
+ if policy := f.opts.LogCaller; policy == All || policy == Error {
+ args = append(args, "caller", f.caller())
+ }
+ args = append(args, "msg", msg)
+ var loggableErr any
+ if err != nil {
+ loggableErr = err.Error()
+ }
+ args = append(args, "error", loggableErr)
+ return prefix, f.render(args, kvList)
+}
+
+// AddName appends the specified name. funcr uses '/' characters to separate
+// name elements. Callers should not pass '/' in the provided name string, but
+// this library does not actually enforce that.
+func (f *Formatter) AddName(name string) {
+ if len(f.prefix) > 0 {
+ f.prefix += "/"
+ }
+ f.prefix += name
+}
+
+// AddValues adds key-value pairs to the set of saved values to be logged with
+// each log line.
+func (f *Formatter) AddValues(kvList []any) {
+ // Three slice args forces a copy.
+ n := len(f.values)
+ f.values = append(f.values[:n:n], kvList...)
+
+ vals := f.values
+ if hook := f.opts.RenderValuesHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+
+ // Pre-render values, so we don't have to do it on each Info/Error call.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ f.flatten(buf, vals, true) // escape user-provided keys
+ f.valuesStr = buf.String()
+}
+
+// AddCallDepth increases the number of stack-frames to skip when attributing
+// the log line to a file and line.
+func (f *Formatter) AddCallDepth(depth int) {
+ f.depth += depth
+}
diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go
new file mode 100644
index 0000000..7bd8476
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go
@@ -0,0 +1,105 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package funcr
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/go-logr/logr"
+)
+
+var _ logr.SlogSink = &fnlogger{}
+
+const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
+
+func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
+ kvList := make([]any, 0, 2*record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ kvList = attrToKVs(attr, kvList)
+ return true
+ })
+
+ if record.Level >= slog.LevelError {
+ l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
+ } else {
+ level := l.levelFromSlog(record.Level)
+ l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
+ }
+ return nil
+}
+
+func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
+ kvList := make([]any, 0, 2*len(attrs))
+ for _, attr := range attrs {
+ kvList = attrToKVs(attr, kvList)
+ }
+ l.AddValues(kvList)
+ return &l
+}
+
+func (l fnlogger) WithGroup(name string) logr.SlogSink {
+ l.startGroup(name)
+ return &l
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, kvList []any) []any {
+ attrVal := attr.Value.Resolve()
+ if attrVal.Kind() == slog.KindGroup {
+ groupVal := attrVal.Group()
+ grpKVs := make([]any, 0, 2*len(groupVal))
+ for _, attr := range groupVal {
+ grpKVs = attrToKVs(attr, grpKVs)
+ }
+ if attr.Key == "" {
+ // slog says we have to inline these
+ kvList = append(kvList, grpKVs...)
+ } else {
+ kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
+ }
+ } else if attr.Key != "" {
+ kvList = append(kvList, attr.Key, attrVal.Any())
+ }
+
+ return kvList
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+// logrV0 := getMyLogger()
+// logrV2 := logrV0.V(2)
+// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
+// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l fnlogger) levelFromSlog(level slog.Level) int {
+ result := -level
+ if result < 0 {
+ result = 0 // because LogSink doesn't expect negative V levels
+ }
+ return int(result)
+}
diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go
new file mode 100644
index 0000000..b4428e1
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/logr.go
@@ -0,0 +1,520 @@
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This design derives from Dave Cheney's blog:
+// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
+
+// Package logr defines a general-purpose logging API and abstract interfaces
+// to back that API. Packages in the Go ecosystem can depend on this package,
+// while callers can implement logging with whatever backend is appropriate.
+//
+// # Usage
+//
+// Logging is done using a Logger instance. Logger is a concrete type with
+// methods, which defers the actual logging to a LogSink interface. The main
+// methods of Logger are Info() and Error(). Arguments to Info() and Error()
+// are key/value pairs rather than printf-style formatted strings, emphasizing
+// "structured logging".
+//
+// With Go's standard log package, we might write:
+//
+// log.Printf("setting target value %s", targetValue)
+//
+// With logr's structured logging, we'd write:
+//
+// logger.Info("setting target", "value", targetValue)
+//
+// Errors are much the same. Instead of:
+//
+// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
+//
+// We'd write:
+//
+// logger.Error(err, "failed to open the pod bay door", "user", user)
+//
+// Info() and Error() are very similar, but they are separate methods so that
+// LogSink implementations can choose to do things like attach additional
+// information (such as stack traces) on calls to Error(). Error() messages are
+// always logged, regardless of the current verbosity. If there is no error
+// instance available, passing nil is valid.
+//
+// # Verbosity
+//
+// Often we want to log information only when the application in "verbose
+// mode". To write log lines that are more verbose, Logger has a V() method.
+// The higher the V-level of a log line, the less critical it is considered.
+// Log-lines with V-levels that are not enabled (as per the LogSink) will not
+// be written. Level V(0) is the default, and logger.V(0).Info() has the same
+// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
+// Error messages do not have a verbosity level and are always logged.
+//
+// Where we might have written:
+//
+// if flVerbose >= 2 {
+// log.Printf("an unusual thing happened")
+// }
+//
+// We can write:
+//
+// logger.V(2).Info("an unusual thing happened")
+//
+// # Logger Names
+//
+// Logger instances can have name strings so that all messages logged through
+// that instance have additional context. For example, you might want to add
+// a subsystem name:
+//
+// logger.WithName("compactor").Info("started", "time", time.Now())
+//
+// The WithName() method returns a new Logger, which can be passed to
+// constructors or other functions for further use. Repeated use of WithName()
+// will accumulate name "segments". These name segments will be joined in some
+// way by the LogSink implementation. It is strongly recommended that name
+// segments contain simple identifiers (letters, digits, and hyphen), and do
+// not contain characters that could muddle the log output or confuse the
+// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
+// quotes, etc).
+//
+// # Saved Values
+//
+// Logger instances can store any number of key/value pairs, which will be
+// logged alongside all messages logged through that instance. For example,
+// you might want to create a Logger instance per managed object:
+//
+// With the standard log package, we might write:
+//
+// log.Printf("decided to set field foo to value %q for object %s/%s",
+// targetValue, object.Namespace, object.Name)
+//
+// With logr we'd write:
+//
+// // Elsewhere: set up the logger to log the object name.
+// obj.logger = mainLogger.WithValues(
+// "name", obj.name, "namespace", obj.namespace)
+//
+// // later on...
+// obj.logger.Info("setting foo", "value", targetValue)
+//
+// # Best Practices
+//
+// Logger has very few hard rules, with the goal that LogSink implementations
+// might have a lot of freedom to differentiate. There are, however, some
+// things to consider.
+//
+// The log message consists of a constant message attached to the log line.
+// This should generally be a simple description of what's occurring, and should
+// never be a format string. Variable information can then be attached using
+// named values.
+//
+// Keys are arbitrary strings, but should generally be constant values. Values
+// may be any Go value, but how the value is formatted is determined by the
+// LogSink implementation.
+//
+// Logger instances are meant to be passed around by value. Code that receives
+// such a value can call its methods without having to check whether the
+// instance is ready for use.
+//
+// The zero logger (= Logger{}) is identical to Discard() and discards all log
+// entries. Code that receives a Logger by value can simply call it, the methods
+// will never crash. For cases where passing a logger is optional, a pointer to Logger
+// should be used.
+//
+// # Key Naming Conventions
+//
+// Keys are not strictly required to conform to any specification or regex, but
+// it is recommended that they:
+// - be human-readable and meaningful (not auto-generated or simple ordinals)
+// - be constant (not dependent on input data)
+// - contain only printable characters
+// - not contain whitespace or punctuation
+// - use lower case for simple keys and lowerCamelCase for more complex ones
+//
+// These guidelines help ensure that log data is processed properly regardless
+// of the log implementation. For example, log implementations will try to
+// output JSON data or will store data for later database (e.g. SQL) queries.
+//
+// While users are generally free to use key names of their choice, it's
+// generally best to avoid using the following keys, as they're frequently used
+// by implementations:
+// - "caller": the calling information (file/line) of a particular log line
+// - "error": the underlying error value in the `Error` method
+// - "level": the log level
+// - "logger": the name of the associated logger
+// - "msg": the log message
+// - "stacktrace": the stack trace associated with a particular log line or
+// error (often from the `Error` message)
+// - "ts": the timestamp for a log line
+//
+// Implementations are encouraged to make use of these keys to represent the
+// above concepts, when necessary (for example, in a pure-JSON output form, it
+// would be necessary to represent at least message and timestamp as ordinary
+// named values).
+//
+// # Break Glass
+//
+// Implementations may choose to give callers access to the underlying
+// logging implementation. The recommended pattern for this is:
+//
+// // Underlier exposes access to the underlying logging implementation.
+// // Since callers only have a logr.Logger, they have to know which
+// // implementation is in use, so this interface is less of an abstraction
+// // and more of way to test type conversion.
+// type Underlier interface {
+// GetUnderlying()
+// }
+//
+// Logger grants access to the sink to enable type assertions like this:
+//
+// func DoSomethingWithImpl(log logr.Logger) {
+// if underlier, ok := log.GetSink().(impl.Underlier); ok {
+// implLogger := underlier.GetUnderlying()
+// ...
+// }
+// }
+//
+// Custom `With*` functions can be implemented by copying the complete
+// Logger struct and replacing the sink in the copy:
+//
+// // WithFooBar changes the foobar parameter in the log sink and returns a
+// // new logger with that modified sink. It does nothing for loggers where
+// // the sink doesn't support that parameter.
+// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
+// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok {
+// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
+// }
+// return log
+// }
+//
+// Don't use New to construct a new Logger with a LogSink retrieved from an
+// existing Logger. Source code attribution might not work correctly and
+// unexported fields in Logger get lost.
+//
+// Beware that the same LogSink instance may be shared by different logger
+// instances. Calling functions that modify the LogSink will affect all of
+// those.
+package logr
+
+// New returns a new Logger instance. This is primarily used by libraries
+// implementing LogSink, rather than end users. Passing a nil sink will create
+// a Logger which discards all log lines.
+func New(sink LogSink) Logger {
+ logger := Logger{}
+ logger.setSink(sink)
+ if sink != nil {
+ sink.Init(runtimeInfo)
+ }
+ return logger
+}
+
+// setSink stores the sink and updates any related fields. It mutates the
+// logger and thus is only safe to use for loggers that are not currently being
+// used concurrently.
+func (l *Logger) setSink(sink LogSink) {
+ l.sink = sink
+}
+
+// GetSink returns the stored sink.
+func (l Logger) GetSink() LogSink {
+ return l.sink
+}
+
+// WithSink returns a copy of the logger with the new sink.
+func (l Logger) WithSink(sink LogSink) Logger {
+ l.setSink(sink)
+ return l
+}
+
+// Logger is an interface to an abstract logging implementation. This is a
+// concrete type for performance reasons, but all the real work is passed on to
+// a LogSink. Implementations of LogSink should provide their own constructors
+// that return Logger, not LogSink.
+//
+// The underlying sink can be accessed through GetSink and be modified through
+// WithSink. This enables the implementation of custom extensions (see "Break
+// Glass" in the package documentation). Normally the sink should be used only
+// indirectly.
+type Logger struct {
+ sink LogSink
+ level int
+}
+
+// Enabled tests whether this Logger is enabled. For example, commandline
+// flags might be used to set the logging verbosity and disable some info logs.
+func (l Logger) Enabled() bool {
+ // Some implementations of LogSink look at the caller in Enabled (e.g.
+ // different verbosity levels per package or file), but we only pass one
+ // CallDepth in (via Init). This means that all calls from Logger to the
+ // LogSink's Enabled, Info, and Error methods must have the same number of
+ // frames. In other words, Logger methods can't call other Logger methods
+ // which call these LogSink methods unless we do it the same in all paths.
+ return l.sink != nil && l.sink.Enabled(l.level)
+}
+
+// Info logs a non-error message with the given key/value pairs as context.
+//
+// The msg argument should be used to add some constant description to the log
+// line. The key/value pairs can then be used to add additional variable
+// information. The key/value pairs must alternate string keys and arbitrary
+// values.
+func (l Logger) Info(msg string, keysAndValues ...any) {
+ if l.sink == nil {
+ return
+ }
+ if l.sink.Enabled(l.level) { // see comment in Enabled
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ withHelper.GetCallStackHelper()()
+ }
+ l.sink.Info(l.level, msg, keysAndValues...)
+ }
+}
+
+// Error logs an error, with the given message and key/value pairs as context.
+// It functions similarly to Info, but may have unique behavior, and should be
+// preferred for logging errors (see the package documentations for more
+// information). The log message will always be emitted, regardless of
+// verbosity level.
+//
+// The msg argument should be used to add context to any underlying error,
+// while the err argument should be used to attach the actual error that
+// triggered this log line, if present. The err parameter is optional
+// and nil may be passed instead of an error instance.
+func (l Logger) Error(err error, msg string, keysAndValues ...any) {
+ if l.sink == nil {
+ return
+ }
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ withHelper.GetCallStackHelper()()
+ }
+ l.sink.Error(err, msg, keysAndValues...)
+}
+
+// V returns a new Logger instance for a specific verbosity level, relative to
+// this Logger. In other words, V-levels are additive. A higher verbosity
+// level means a log message is less important. Negative V-levels are treated
+// as 0.
+func (l Logger) V(level int) Logger {
+ if l.sink == nil {
+ return l
+ }
+ if level < 0 {
+ level = 0
+ }
+ l.level += level
+ return l
+}
+
+// GetV returns the verbosity level of the logger. If the logger's LogSink is
+// nil as in the Discard logger, this will always return 0.
+func (l Logger) GetV() int {
+ // 0 if l.sink nil because of the if check in V above.
+ return l.level
+}
+
+// WithValues returns a new Logger instance with additional key/value pairs.
+// See Info for documentation on how key/value pairs work.
+func (l Logger) WithValues(keysAndValues ...any) Logger {
+ if l.sink == nil {
+ return l
+ }
+ l.setSink(l.sink.WithValues(keysAndValues...))
+ return l
+}
+
+// WithName returns a new Logger instance with the specified name element added
+// to the Logger's name. Successive calls with WithName append additional
+// suffixes to the Logger's name. It's strongly recommended that name segments
+// contain only letters, digits, and hyphens (see the package documentation for
+// more information).
+func (l Logger) WithName(name string) Logger {
+ if l.sink == nil {
+ return l
+ }
+ l.setSink(l.sink.WithName(name))
+ return l
+}
+
+// WithCallDepth returns a Logger instance that offsets the call stack by the
+// specified number of frames when logging call site information, if possible.
+// This is useful for users who have helper functions between the "real" call
+// site and the actual calls to Logger methods. If depth is 0 the attribution
+// should be to the direct caller of this function. If depth is 1 the
+// attribution should skip 1 call frame, and so on. Successive calls to this
+// are additive.
+//
+// If the underlying log implementation supports a WithCallDepth(int) method,
+// it will be called and the result returned. If the implementation does not
+// support CallDepthLogSink, the original Logger will be returned.
+//
+// To skip one level, WithCallStackHelper() should be used instead of
+// WithCallDepth(1) because it works with implementions that support the
+// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
+func (l Logger) WithCallDepth(depth int) Logger {
+ if l.sink == nil {
+ return l
+ }
+ if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
+ l.setSink(withCallDepth.WithCallDepth(depth))
+ }
+ return l
+}
+
+// WithCallStackHelper returns a new Logger instance that skips the direct
+// caller when logging call site information, if possible. This is useful for
+// users who have helper functions between the "real" call site and the actual
+// calls to Logger methods and want to support loggers which depend on marking
+// each individual helper function, like loggers based on testing.T.
+//
+// In addition to using that new logger instance, callers also must call the
+// returned function.
+//
+// If the underlying log implementation supports a WithCallDepth(int) method,
+// WithCallDepth(1) will be called to produce a new logger. If it supports a
+// WithCallStackHelper() method, that will be also called. If the
+// implementation does not support either of these, the original Logger will be
+// returned.
+func (l Logger) WithCallStackHelper() (func(), Logger) {
+ if l.sink == nil {
+ return func() {}, l
+ }
+ var helper func()
+ if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
+ l.setSink(withCallDepth.WithCallDepth(1))
+ }
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ helper = withHelper.GetCallStackHelper()
+ } else {
+ helper = func() {}
+ }
+ return helper, l
+}
+
+// IsZero returns true if this logger is an uninitialized zero value
+func (l Logger) IsZero() bool {
+ return l.sink == nil
+}
+
+// RuntimeInfo holds information that the logr "core" library knows which
+// LogSinks might want to know.
+type RuntimeInfo struct {
+ // CallDepth is the number of call frames the logr library adds between the
+ // end-user and the LogSink. LogSink implementations which choose to print
+ // the original logging site (e.g. file & line) should climb this many
+ // additional frames to find it.
+ CallDepth int
+}
+
+// runtimeInfo is a static global. It must not be changed at run time.
+var runtimeInfo = RuntimeInfo{
+ CallDepth: 1,
+}
+
+// LogSink represents a logging implementation. End-users will generally not
+// interact with this type.
+type LogSink interface {
+ // Init receives optional information about the logr library for LogSink
+ // implementations that need it.
+ Init(info RuntimeInfo)
+
+ // Enabled tests whether this LogSink is enabled at the specified V-level.
+ // For example, commandline flags might be used to set the logging
+ // verbosity and disable some info logs.
+ Enabled(level int) bool
+
+ // Info logs a non-error message with the given key/value pairs as context.
+ // The level argument is provided for optional logging. This method will
+ // only be called when Enabled(level) is true. See Logger.Info for more
+ // details.
+ Info(level int, msg string, keysAndValues ...any)
+
+ // Error logs an error, with the given message and key/value pairs as
+ // context. See Logger.Error for more details.
+ Error(err error, msg string, keysAndValues ...any)
+
+ // WithValues returns a new LogSink with additional key/value pairs. See
+ // Logger.WithValues for more details.
+ WithValues(keysAndValues ...any) LogSink
+
+ // WithName returns a new LogSink with the specified name appended. See
+ // Logger.WithName for more details.
+ WithName(name string) LogSink
+}
+
+// CallDepthLogSink represents a LogSink that knows how to climb the call stack
+// to identify the original call site and can offset the depth by a specified
+// number of frames. This is useful for users who have helper functions
+// between the "real" call site and the actual calls to Logger methods.
+// Implementations that log information about the call site (such as file,
+// function, or line) would otherwise log information about the intermediate
+// helper functions.
+//
+// This is an optional interface and implementations are not required to
+// support it.
+type CallDepthLogSink interface {
+ // WithCallDepth returns a LogSink that will offset the call
+ // stack by the specified number of frames when logging call
+ // site information.
+ //
+ // If depth is 0, the LogSink should skip exactly the number
+ // of call frames defined in RuntimeInfo.CallDepth when Info
+ // or Error are called, i.e. the attribution should be to the
+ // direct caller of Logger.Info or Logger.Error.
+ //
+ // If depth is 1 the attribution should skip 1 call frame, and so on.
+ // Successive calls to this are additive.
+ WithCallDepth(depth int) LogSink
+}
+
+// CallStackHelperLogSink represents a LogSink that knows how to climb
+// the call stack to identify the original call site and can skip
+// intermediate helper functions if they mark themselves as
+// helper. Go's testing package uses that approach.
+//
+// This is useful for users who have helper functions between the
+// "real" call site and the actual calls to Logger methods.
+// Implementations that log information about the call site (such as
+// file, function, or line) would otherwise log information about the
+// intermediate helper functions.
+//
+// This is an optional interface and implementations are not required
+// to support it. Implementations that choose to support this must not
+// simply implement it as WithCallDepth(1), because
+// Logger.WithCallStackHelper will call both methods if they are
+// present. This should only be implemented for LogSinks that actually
+// need it, as with testing.T.
+type CallStackHelperLogSink interface {
+ // GetCallStackHelper returns a function that must be called
+ // to mark the direct caller as helper function when logging
+ // call site information.
+ GetCallStackHelper() func()
+}
+
+// Marshaler is an optional interface that logged values may choose to
+// implement. Loggers with structured output, such as JSON, should
+// log the object return by the MarshalLog method instead of the
+// original value.
+type Marshaler interface {
+ // MarshalLog can be used to:
+ // - ensure that structs are not logged as strings when the original
+ // value has a String method: return a different type without a
+ // String method
+ // - select which fields of a complex type should get logged:
+ // return a simpler struct with fewer fields
+ // - log unexported fields: return a different struct
+ // with exported fields
+ //
+ // It may return any value of any type.
+ MarshalLog() any
+}
diff --git a/vendor/github.com/go-logr/logr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go
new file mode 100644
index 0000000..82d1ba4
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/sloghandler.go
@@ -0,0 +1,192 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+)
+
+type slogHandler struct {
+ // May be nil, in which case all logs get discarded.
+ sink LogSink
+ // Non-nil if sink is non-nil and implements SlogSink.
+ slogSink SlogSink
+
+ // groupPrefix collects values from WithGroup calls. It gets added as
+ // prefix to value keys when handling a log record.
+ groupPrefix string
+
+ // levelBias can be set when constructing the handler to influence the
+ // slog.Level of log records. A positive levelBias reduces the
+ // slog.Level value. slog has no API to influence this value after the
+ // handler got created, so it can only be set indirectly through
+ // Logger.V.
+ levelBias slog.Level
+}
+
+var _ slog.Handler = &slogHandler{}
+
+// groupSeparator is used to concatenate WithGroup names and attribute keys.
+const groupSeparator = "."
+
+// GetLevel is used for black box unit testing.
+func (l *slogHandler) GetLevel() slog.Level {
+ return l.levelBias
+}
+
+func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
+ return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
+}
+
+func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
+ if l.slogSink != nil {
+ // Only adjust verbosity level of log entries < slog.LevelError.
+ if record.Level < slog.LevelError {
+ record.Level -= l.levelBias
+ }
+ return l.slogSink.Handle(ctx, record)
+ }
+
+ // No need to check for nil sink here because Handle will only be called
+ // when Enabled returned true.
+
+ kvList := make([]any, 0, 2*record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ kvList = attrToKVs(attr, l.groupPrefix, kvList)
+ return true
+ })
+ if record.Level >= slog.LevelError {
+ l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
+ } else {
+ level := l.levelFromSlog(record.Level)
+ l.sinkWithCallDepth().Info(level, record.Message, kvList...)
+ }
+ return nil
+}
+
+// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
+// are called by Handle, code in slog gets skipped.
+//
+// This offset currently (Go 1.21.0) works for calls through
+// slog.New(ToSlogHandler(...)). There's no guarantee that the call
+// chain won't change. Wrapping the handler will also break unwinding. It's
+// still better than not adjusting at all....
+//
+// This cannot be done when constructing the handler because FromSlogHandler needs
+// access to the original sink without this adjustment. A second copy would
+// work, but then WithAttrs would have to be called for both of them.
+func (l *slogHandler) sinkWithCallDepth() LogSink {
+ if sink, ok := l.sink.(CallDepthLogSink); ok {
+ return sink.WithCallDepth(2)
+ }
+ return l.sink
+}
+
+func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ if l.sink == nil || len(attrs) == 0 {
+ return l
+ }
+
+ clone := *l
+ if l.slogSink != nil {
+ clone.slogSink = l.slogSink.WithAttrs(attrs)
+ clone.sink = clone.slogSink
+ } else {
+ kvList := make([]any, 0, 2*len(attrs))
+ for _, attr := range attrs {
+ kvList = attrToKVs(attr, l.groupPrefix, kvList)
+ }
+ clone.sink = l.sink.WithValues(kvList...)
+ }
+ return &clone
+}
+
+func (l *slogHandler) WithGroup(name string) slog.Handler {
+ if l.sink == nil {
+ return l
+ }
+ if name == "" {
+ // slog says to inline empty groups
+ return l
+ }
+ clone := *l
+ if l.slogSink != nil {
+ clone.slogSink = l.slogSink.WithGroup(name)
+ clone.sink = clone.slogSink
+ } else {
+ clone.groupPrefix = addPrefix(clone.groupPrefix, name)
+ }
+ return &clone
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
+ attrVal := attr.Value.Resolve()
+ if attrVal.Kind() == slog.KindGroup {
+ groupVal := attrVal.Group()
+ grpKVs := make([]any, 0, 2*len(groupVal))
+ prefix := groupPrefix
+ if attr.Key != "" {
+ prefix = addPrefix(groupPrefix, attr.Key)
+ }
+ for _, attr := range groupVal {
+ grpKVs = attrToKVs(attr, prefix, grpKVs)
+ }
+ kvList = append(kvList, grpKVs...)
+ } else if attr.Key != "" {
+ kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
+ }
+
+ return kvList
+}
+
+func addPrefix(prefix, name string) string {
+ if prefix == "" {
+ return name
+ }
+ if name == "" {
+ return prefix
+ }
+ return prefix + groupSeparator + name
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+// logrV0 := getMyLogger()
+// logrV2 := logrV0.V(2)
+// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
+// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l *slogHandler) levelFromSlog(level slog.Level) int {
+ result := -level
+ result += l.levelBias // in case the original Logger had a V level
+ if result < 0 {
+ result = 0 // because LogSink doesn't expect negative V levels
+ }
+ return int(result)
+}
diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go
new file mode 100644
index 0000000..28a83d0
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/slogr.go
@@ -0,0 +1,100 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+)
+
+// FromSlogHandler returns a Logger which writes to the slog.Handler.
+//
+// The logr verbosity level is mapped to slog levels such that V(0) becomes
+// slog.LevelInfo and V(4) becomes slog.LevelDebug.
+func FromSlogHandler(handler slog.Handler) Logger {
+ if handler, ok := handler.(*slogHandler); ok {
+ if handler.sink == nil {
+ return Discard()
+ }
+ return New(handler.sink).V(int(handler.levelBias))
+ }
+ return New(&slogSink{handler: handler})
+}
+
+// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
+//
+// The returned logger writes all records with level >= slog.LevelError as
+// error log entries with LogSink.Error, regardless of the verbosity level of
+// the Logger:
+//
+// logger :=
+// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
+//
+// The level of all other records gets reduced by the verbosity
+// level of the Logger and the result is negated. If it happens
+// to be negative, then it gets replaced by zero because a LogSink
+// is not expected to handled negative levels:
+//
+// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
+// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
+// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
+// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
+func ToSlogHandler(logger Logger) slog.Handler {
+ if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
+ return sink.handler
+ }
+
+ handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
+ if slogSink, ok := handler.sink.(SlogSink); ok {
+ handler.slogSink = slogSink
+ }
+ return handler
+}
+
+// SlogSink is an optional interface that a LogSink can implement to support
+// logging through the slog.Logger or slog.Handler APIs better. It then should
+// also support special slog values like slog.Group. When used as a
+// slog.Handler, the advantages are:
+//
+// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
+// as intended by slog
+// - proper grouping of key/value pairs via WithGroup
+// - verbosity levels > slog.LevelInfo can be recorded
+// - less overhead
+//
+// Both APIs (Logger and slog.Logger/Handler) then are supported equally
+// well. Developers can pick whatever API suits them better and/or mix
+// packages which use either API in the same binary with a common logging
+// implementation.
+//
+// This interface is necessary because the type implementing the LogSink
+// interface cannot also implement the slog.Handler interface due to the
+// different prototype of the common Enabled method.
+//
+// An implementation could support both interfaces in two different types, but then
+// additional interfaces would be needed to convert between those types in FromSlogHandler
+// and ToSlogHandler.
+type SlogSink interface {
+ LogSink
+
+ Handle(ctx context.Context, record slog.Record) error
+ WithAttrs(attrs []slog.Attr) SlogSink
+ WithGroup(name string) SlogSink
+}
diff --git a/vendor/github.com/go-logr/logr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go
new file mode 100644
index 0000000..4060fcb
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/slogsink.go
@@ -0,0 +1,120 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+ "runtime"
+ "time"
+)
+
+var (
+ _ LogSink = &slogSink{}
+ _ CallDepthLogSink = &slogSink{}
+ _ Underlier = &slogSink{}
+)
+
+// Underlier is implemented by the LogSink returned by NewFromLogHandler.
+type Underlier interface {
+ // GetUnderlying returns the Handler used by the LogSink.
+ GetUnderlying() slog.Handler
+}
+
+const (
+ // nameKey is used to log the `WithName` values as an additional attribute.
+ nameKey = "logger"
+
+ // errKey is used to log the error parameter of Error as an additional attribute.
+ errKey = "err"
+)
+
+type slogSink struct {
+ callDepth int
+ name string
+ handler slog.Handler
+}
+
+func (l *slogSink) Init(info RuntimeInfo) {
+ l.callDepth = info.CallDepth
+}
+
+func (l *slogSink) GetUnderlying() slog.Handler {
+ return l.handler
+}
+
+func (l *slogSink) WithCallDepth(depth int) LogSink {
+ newLogger := *l
+ newLogger.callDepth += depth
+ return &newLogger
+}
+
+func (l *slogSink) Enabled(level int) bool {
+ return l.handler.Enabled(context.Background(), slog.Level(-level))
+}
+
+func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
+ l.log(nil, msg, slog.Level(-level), kvList...)
+}
+
+func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
+ l.log(err, msg, slog.LevelError, kvList...)
+}
+
+func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
+ var pcs [1]uintptr
+ // skip runtime.Callers, this function, Info/Error, and all helper functions above that.
+ runtime.Callers(3+l.callDepth, pcs[:])
+
+ record := slog.NewRecord(time.Now(), level, msg, pcs[0])
+ if l.name != "" {
+ record.AddAttrs(slog.String(nameKey, l.name))
+ }
+ if err != nil {
+ record.AddAttrs(slog.Any(errKey, err))
+ }
+ record.Add(kvList...)
+ _ = l.handler.Handle(context.Background(), record)
+}
+
+func (l slogSink) WithName(name string) LogSink {
+ if l.name != "" {
+ l.name += "/"
+ }
+ l.name += name
+ return &l
+}
+
+func (l slogSink) WithValues(kvList ...interface{}) LogSink {
+ l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
+ return &l
+}
+
+func kvListToAttrs(kvList ...interface{}) []slog.Attr {
+ // We don't need the record itself, only its Add method.
+ record := slog.NewRecord(time.Time{}, 0, "", 0)
+ record.Add(kvList...)
+ attrs := make([]slog.Attr, 0, record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ attrs = append(attrs, attr)
+ return true
+ })
+ return attrs
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/.editorconfig b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig
new file mode 100644
index 0000000..b0c9536
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig
@@ -0,0 +1,14 @@
+# editorconfig.org
+
+root = true
+
+[*]
+insert_final_newline = true
+charset = utf-8
+trim_trailing_whitespace = true
+indent_style = tab
+indent_size = 8
+
+[*.{md,yml,yaml,json}]
+indent_style = space
+indent_size = 2
diff --git a/vendor/github.com/go-task/slim-sprig/v3/.gitattributes b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes
new file mode 100644
index 0000000..176a458
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes
@@ -0,0 +1 @@
+* text=auto
diff --git a/vendor/github.com/go-task/slim-sprig/v3/.gitignore b/vendor/github.com/go-task/slim-sprig/v3/.gitignore
new file mode 100644
index 0000000..5e3002f
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/.gitignore
@@ -0,0 +1,2 @@
+vendor/
+/.glide
diff --git a/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md
new file mode 100644
index 0000000..2ce45dd
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md
@@ -0,0 +1,383 @@
+# Changelog
+
+## Release 3.2.3 (2022-11-29)
+
+### Changed
+
+- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi)
+- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero)
+- #353: Updated masterminds/semver which included bug fixes
+- #354: Updated golang.org/x/crypto which included bug fixes
+
+## Release 3.2.2 (2021-02-04)
+
+This is a re-release of 3.2.1 to satisfy something with the Go module system.
+
+## Release 3.2.1 (2021-02-04)
+
+### Changed
+
+- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr)
+
+## Release 3.2.0 (2020-12-14)
+
+### Added
+
+- #211: Added randInt function (thanks @kochurovro)
+- #223: Added fromJson and mustFromJson functions (thanks @mholt)
+- #242: Added a bcrypt function (thanks @robbiet480)
+- #253: Added randBytes function (thanks @MikaelSmith)
+- #254: Added dig function for dicts (thanks @nyarly)
+- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton)
+- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl)
+- #268: Added and and all functions for testing conditions (thanks @phuslu)
+- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf
+ (thanks @andrewmostello)
+- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek)
+- #270: Extend certificate functions to handle non-RSA keys + add support for
+ ed25519 keys (thanks @misberner)
+
+### Changed
+
+- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer
+- Using semver 3.1.1 and mergo 0.3.11
+
+### Fixed
+
+- #249: Fix htmlDateInZone example (thanks @spawnia)
+
+NOTE: The dependency github.com/imdario/mergo reverted the breaking change in
+0.3.9 via 0.3.10 release.
+
+## Release 3.1.0 (2020-04-16)
+
+NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9
+that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8.
+
+### Added
+
+- #225: Added support for generating htpasswd hash (thanks @rustycl0ck)
+- #224: Added duration filter (thanks @frebib)
+- #205: Added `seq` function (thanks @thadc23)
+
+### Changed
+
+- #203: Unlambda functions with correct signature (thanks @muesli)
+- #236: Updated the license formatting for GitHub display purposes
+- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9
+ as it causes a breaking change for sprig. That issue is tracked at
+ https://github.com/imdario/mergo/issues/139
+
+### Fixed
+
+- #229: Fix `seq` example in docs (thanks @kalmant)
+
+## Release 3.0.2 (2019-12-13)
+
+### Fixed
+
+- #220: Updating to semver v3.0.3 to fix issue with <= ranges
+- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya)
+
+## Release 3.0.1 (2019-12-08)
+
+### Fixed
+
+- #212: Updated semver fixing broken constraint checking with ^0.0
+
+## Release 3.0.0 (2019-10-02)
+
+### Added
+
+- #187: Added durationRound function (thanks @yjp20)
+- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn)
+- #193: Added toRawJson support (thanks @Dean-Coakley)
+- #197: Added get support to dicts (thanks @Dean-Coakley)
+
+### Changed
+
+- #186: Moving dependency management to Go modules
+- #186: Updated semver to v3. This has changes in the way ^ is handled
+- #194: Updated documentation on merging and how it copies. Added example using deepCopy
+- #196: trunc now supports negative values (thanks @Dean-Coakley)
+
+## Release 2.22.0 (2019-10-02)
+
+### Added
+
+- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos)
+- #195: Added deepCopy function for use with dicts
+
+### Changed
+
+- Updated merge and mergeOverwrite documentation to explain copying and how to
+ use deepCopy with it
+
+## Release 2.21.0 (2019-09-18)
+
+### Added
+
+- #122: Added encryptAES/decryptAES functions (thanks @n0madic)
+- #128: Added toDecimal support (thanks @Dean-Coakley)
+- #169: Added list contcat (thanks @astorath)
+- #174: Added deepEqual function (thanks @bonifaido)
+- #170: Added url parse and join functions (thanks @astorath)
+
+### Changed
+
+- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify
+
+### Fixed
+
+- #172: Fix semver wildcard example (thanks @piepmatz)
+- #175: Fix dateInZone doc example (thanks @s3than)
+
+## Release 2.20.0 (2019-06-18)
+
+### Added
+
+- #164: Adding function to get unix epoch for a time (@mattfarina)
+- #166: Adding tests for date_in_zone (@mattfarina)
+
+### Changed
+
+- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam)
+- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19)
+- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan)
+
+### Fixed
+
+## Release 2.19.0 (2019-03-02)
+
+IMPORTANT: This release reverts a change from 2.18.0
+
+In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random.
+
+We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience.
+
+### Changed
+
+- Fix substr panic 35fb796 (Alexey igrychev)
+- Remove extra period 1eb7729 (Matthew Lorimor)
+- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor)
+- README edits/fixes/suggestions 08fe136 (Lauri Apple)
+
+
+## Release 2.18.0 (2019-02-12)
+
+### Added
+
+- Added mergeOverwrite function
+- cryptographic functions that use secure random (see fe1de12)
+
+### Changed
+
+- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer)
+- Handle has for nil list 9c10885 (Daniel Cohen)
+- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder)
+- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic)
+- Replace outdated goutils imports 01893d2 (Matthew Lorimor)
+- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor)
+- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen)
+
+### Fixed
+
+- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder)
+- Fix substr var names and comments d581f80 (Dean Coakley)
+- Fix substr documentation 2737203 (Dean Coakley)
+
+## Release 2.17.1 (2019-01-03)
+
+### Fixed
+
+The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml.
+
+## Release 2.17.0 (2019-01-03)
+
+### Added
+
+- adds alder32sum function and test 6908fc2 (marshallford)
+- Added kebabcase function ca331a1 (Ilyes512)
+
+### Changed
+
+- Update goutils to 1.1.0 4e1125d (Matt Butcher)
+
+### Fixed
+
+- Fix 'has' documentation e3f2a85 (dean-coakley)
+- docs(dict): fix typo in pick example dc424f9 (Dustin Specker)
+- fixes spelling errors... not sure how that happened 4cf188a (marshallford)
+
+## Release 2.16.0 (2018-08-13)
+
+### Added
+
+- add splitn function fccb0b0 (Helgi Þorbjörnsson)
+- Add slice func df28ca7 (gongdo)
+- Generate serial number a3bdffd (Cody Coons)
+- Extract values of dict with values function df39312 (Lawrence Jones)
+
+### Changed
+
+- Modify panic message for list.slice ae38335 (gongdo)
+- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap)
+- Remove duplicated documentation 1d97af1 (Matthew Fisher)
+- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson)
+
+### Fixed
+
+- Fix file permissions c5f40b5 (gongdo)
+- Fix example for buildCustomCert 7779e0d (Tin Lam)
+
+## Release 2.15.0 (2018-04-02)
+
+### Added
+
+- #68 and #69: Add json helpers to docs (thanks @arunvelsriram)
+- #66: Add ternary function (thanks @binoculars)
+- #67: Allow keys function to take multiple dicts (thanks @binoculars)
+- #89: Added sha1sum to crypto function (thanks @benkeil)
+- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei)
+- #92: Add travis testing for go 1.10
+- #93: Adding appveyor config for windows testing
+
+### Changed
+
+- #90: Updating to more recent dependencies
+- #73: replace satori/go.uuid with google/uuid (thanks @petterw)
+
+### Fixed
+
+- #76: Fixed documentation typos (thanks @Thiht)
+- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older
+
+## Release 2.14.1 (2017-12-01)
+
+### Fixed
+
+- #60: Fix typo in function name documentation (thanks @neil-ca-moore)
+- #61: Removing line with {{ due to blocking github pages genertion
+- #64: Update the list functions to handle int, string, and other slices for compatibility
+
+## Release 2.14.0 (2017-10-06)
+
+This new version of Sprig adds a set of functions for generating and working with SSL certificates.
+
+- `genCA` generates an SSL Certificate Authority
+- `genSelfSignedCert` generates an SSL self-signed certificate
+- `genSignedCert` generates an SSL certificate and key based on a given CA
+
+## Release 2.13.0 (2017-09-18)
+
+This release adds new functions, including:
+
+- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions
+- `floor`, `ceil`, and `round` math functions
+- `toDate` converts a string to a date
+- `nindent` is just like `indent` but also prepends a new line
+- `ago` returns the time from `time.Now`
+
+### Added
+
+- #40: Added basic regex functionality (thanks @alanquillin)
+- #41: Added ceil floor and round functions (thanks @alanquillin)
+- #48: Added toDate function (thanks @andreynering)
+- #50: Added nindent function (thanks @binoculars)
+- #46: Added ago function (thanks @slayer)
+
+### Changed
+
+- #51: Updated godocs to include new string functions (thanks @curtisallen)
+- #49: Added ability to merge multiple dicts (thanks @binoculars)
+
+## Release 2.12.0 (2017-05-17)
+
+- `snakecase`, `camelcase`, and `shuffle` are three new string functions
+- `fail` allows you to bail out of a template render when conditions are not met
+
+## Release 2.11.0 (2017-05-02)
+
+- Added `toJson` and `toPrettyJson`
+- Added `merge`
+- Refactored documentation
+
+## Release 2.10.0 (2017-03-15)
+
+- Added `semver` and `semverCompare` for Semantic Versions
+- `list` replaces `tuple`
+- Fixed issue with `join`
+- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without`
+
+## Release 2.9.0 (2017-02-23)
+
+- Added `splitList` to split a list
+- Added crypto functions of `genPrivateKey` and `derivePassword`
+
+## Release 2.8.0 (2016-12-21)
+
+- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`)
+- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`)
+
+## Release 2.7.0 (2016-12-01)
+
+- Added `sha256sum` to generate a hash of an input
+- Added functions to convert a numeric or string to `int`, `int64`, `float64`
+
+## Release 2.6.0 (2016-10-03)
+
+- Added a `uuidv4` template function for generating UUIDs inside of a template.
+
+## Release 2.5.0 (2016-08-19)
+
+- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions
+- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`)
+- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0
+
+## Release 2.4.0 (2016-08-16)
+
+- Adds two functions: `until` and `untilStep`
+
+## Release 2.3.0 (2016-06-21)
+
+- cat: Concatenate strings with whitespace separators.
+- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First"
+- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos"
+- indent: Indent blocks of text in a way that is sensitive to "\n" characters.
+
+## Release 2.2.0 (2016-04-21)
+
+- Added a `genPrivateKey` function (Thanks @bacongobbler)
+
+## Release 2.1.0 (2016-03-30)
+
+- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`.
+- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output.
+
+## Release 2.0.0 (2016-03-29)
+
+Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented.
+
+- `min` complements `max` (formerly `biggest`)
+- `empty` indicates that a value is the empty value for its type
+- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}`
+- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}`
+- Date formatters have been added for HTML dates (as used in `date` input fields)
+- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`).
+
+## Release 1.2.0 (2016-02-01)
+
+- Added quote and squote
+- Added b32enc and b32dec
+- add now takes varargs
+- biggest now takes varargs
+
+## Release 1.1.0 (2015-12-29)
+
+- Added #4: Added contains function. strings.Contains, but with the arguments
+ switched to simplify common pipelines. (thanks krancour)
+- Added Travis-CI testing support
+
+## Release 1.0.0 (2015-12-23)
+
+- Initial release
diff --git a/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt
new file mode 100644
index 0000000..f311b1e
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (C) 2013-2020 Masterminds
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/go-task/slim-sprig/v3/README.md b/vendor/github.com/go-task/slim-sprig/v3/README.md
new file mode 100644
index 0000000..b5ab564
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/README.md
@@ -0,0 +1,73 @@
+# Slim-Sprig: Template functions for Go templates [](https://pkg.go.dev/github.com/go-task/slim-sprig/v3)
+
+Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with
+all functions that depend on external (non standard library) or crypto packages
+removed.
+The reason for this is to make this library more lightweight. Most of these
+functions (specially crypto ones) are not needed on most apps, but costs a lot
+in terms of binary size and compilation time.
+
+## Usage
+
+**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for
+detailed instructions and code snippets for the >100 template functions available.
+
+**Go developers**: If you'd like to include Slim-Sprig as a library in your program,
+our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig).
+
+For standard usage, read on.
+
+### Load the Slim-Sprig library
+
+To load the Slim-Sprig `FuncMap`:
+
+```go
+
+import (
+ "html/template"
+
+ "github.com/go-task/slim-sprig"
+)
+
+// This example illustrates that the FuncMap *must* be set before the
+// templates themselves are loaded.
+tpl := template.Must(
+ template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html")
+)
+```
+
+### Calling the functions inside of templates
+
+By convention, all functions are lowercase. This seems to follow the Go
+idiom for template functions (as opposed to template methods, which are
+TitleCase). For example, this:
+
+```
+{{ "hello!" | upper | repeat 5 }}
+```
+
+produces this:
+
+```
+HELLO!HELLO!HELLO!HELLO!HELLO!
+```
+
+## Principles Driving Our Function Selection
+
+We followed these principles to decide which functions to add and how to implement them:
+
+- Use template functions to build layout. The following
+ types of operations are within the domain of template functions:
+ - Formatting
+ - Layout
+ - Simple type conversions
+ - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic)
+- Template functions should not return errors unless there is no way to print
+ a sensible value. For example, converting a string to an integer should not
+ produce an error if conversion fails. Instead, it should display a default
+ value.
+- Simple math is necessary for grid layouts, pagers, and so on. Complex math
+ (anything other than arithmetic) should be done outside of templates.
+- Template functions only deal with the data passed into them. They never retrieve
+ data from a source.
+- Finally, do not override core Go template functions.
diff --git a/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml
new file mode 100644
index 0000000..8e6346b
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml
@@ -0,0 +1,12 @@
+# https://taskfile.dev
+
+version: '3'
+
+tasks:
+ default:
+ cmds:
+ - task: test
+
+ test:
+ cmds:
+ - go test -v .
diff --git a/vendor/github.com/go-task/slim-sprig/v3/crypto.go b/vendor/github.com/go-task/slim-sprig/v3/crypto.go
new file mode 100644
index 0000000..d06e516
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/crypto.go
@@ -0,0 +1,24 @@
+package sprig
+
+import (
+ "crypto/sha1"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "hash/adler32"
+)
+
+func sha256sum(input string) string {
+ hash := sha256.Sum256([]byte(input))
+ return hex.EncodeToString(hash[:])
+}
+
+func sha1sum(input string) string {
+ hash := sha1.Sum([]byte(input))
+ return hex.EncodeToString(hash[:])
+}
+
+func adler32sum(input string) string {
+ hash := adler32.Checksum([]byte(input))
+ return fmt.Sprintf("%d", hash)
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/date.go b/vendor/github.com/go-task/slim-sprig/v3/date.go
new file mode 100644
index 0000000..ed022dd
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/date.go
@@ -0,0 +1,152 @@
+package sprig
+
+import (
+ "strconv"
+ "time"
+)
+
+// Given a format and a date, format the date string.
+//
+// Date can be a `time.Time` or an `int, int32, int64`.
+// In the later case, it is treated as seconds since UNIX
+// epoch.
+func date(fmt string, date interface{}) string {
+ return dateInZone(fmt, date, "Local")
+}
+
+func htmlDate(date interface{}) string {
+ return dateInZone("2006-01-02", date, "Local")
+}
+
+func htmlDateInZone(date interface{}, zone string) string {
+ return dateInZone("2006-01-02", date, zone)
+}
+
+func dateInZone(fmt string, date interface{}, zone string) string {
+ var t time.Time
+ switch date := date.(type) {
+ default:
+ t = time.Now()
+ case time.Time:
+ t = date
+ case *time.Time:
+ t = *date
+ case int64:
+ t = time.Unix(date, 0)
+ case int:
+ t = time.Unix(int64(date), 0)
+ case int32:
+ t = time.Unix(int64(date), 0)
+ }
+
+ loc, err := time.LoadLocation(zone)
+ if err != nil {
+ loc, _ = time.LoadLocation("UTC")
+ }
+
+ return t.In(loc).Format(fmt)
+}
+
+func dateModify(fmt string, date time.Time) time.Time {
+ d, err := time.ParseDuration(fmt)
+ if err != nil {
+ return date
+ }
+ return date.Add(d)
+}
+
+func mustDateModify(fmt string, date time.Time) (time.Time, error) {
+ d, err := time.ParseDuration(fmt)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return date.Add(d), nil
+}
+
+func dateAgo(date interface{}) string {
+ var t time.Time
+
+ switch date := date.(type) {
+ default:
+ t = time.Now()
+ case time.Time:
+ t = date
+ case int64:
+ t = time.Unix(date, 0)
+ case int:
+ t = time.Unix(int64(date), 0)
+ }
+ // Drop resolution to seconds
+ duration := time.Since(t).Round(time.Second)
+ return duration.String()
+}
+
+func duration(sec interface{}) string {
+ var n int64
+ switch value := sec.(type) {
+ default:
+ n = 0
+ case string:
+ n, _ = strconv.ParseInt(value, 10, 64)
+ case int64:
+ n = value
+ }
+ return (time.Duration(n) * time.Second).String()
+}
+
+func durationRound(duration interface{}) string {
+ var d time.Duration
+ switch duration := duration.(type) {
+ default:
+ d = 0
+ case string:
+ d, _ = time.ParseDuration(duration)
+ case int64:
+ d = time.Duration(duration)
+ case time.Time:
+ d = time.Since(duration)
+ }
+
+ u := uint64(d)
+ neg := d < 0
+ if neg {
+ u = -u
+ }
+
+ var (
+ year = uint64(time.Hour) * 24 * 365
+ month = uint64(time.Hour) * 24 * 30
+ day = uint64(time.Hour) * 24
+ hour = uint64(time.Hour)
+ minute = uint64(time.Minute)
+ second = uint64(time.Second)
+ )
+ switch {
+ case u > year:
+ return strconv.FormatUint(u/year, 10) + "y"
+ case u > month:
+ return strconv.FormatUint(u/month, 10) + "mo"
+ case u > day:
+ return strconv.FormatUint(u/day, 10) + "d"
+ case u > hour:
+ return strconv.FormatUint(u/hour, 10) + "h"
+ case u > minute:
+ return strconv.FormatUint(u/minute, 10) + "m"
+ case u > second:
+ return strconv.FormatUint(u/second, 10) + "s"
+ }
+ return "0s"
+}
+
+func toDate(fmt, str string) time.Time {
+ t, _ := time.ParseInLocation(fmt, str, time.Local)
+ return t
+}
+
+func mustToDate(fmt, str string) (time.Time, error) {
+ return time.ParseInLocation(fmt, str, time.Local)
+}
+
+func unixEpoch(date time.Time) string {
+ return strconv.FormatInt(date.Unix(), 10)
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/defaults.go b/vendor/github.com/go-task/slim-sprig/v3/defaults.go
new file mode 100644
index 0000000..b9f9796
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/defaults.go
@@ -0,0 +1,163 @@
+package sprig
+
+import (
+ "bytes"
+ "encoding/json"
+ "math/rand"
+ "reflect"
+ "strings"
+ "time"
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+// dfault checks whether `given` is set, and returns default if not set.
+//
+// This returns `d` if `given` appears not to be set, and `given` otherwise.
+//
+// For numeric types 0 is unset.
+// For strings, maps, arrays, and slices, len() = 0 is considered unset.
+// For bool, false is unset.
+// Structs are never considered unset.
+//
+// For everything else, including pointers, a nil value is unset.
+func dfault(d interface{}, given ...interface{}) interface{} {
+
+ if empty(given) || empty(given[0]) {
+ return d
+ }
+ return given[0]
+}
+
+// empty returns true if the given value has the zero value for its type.
+func empty(given interface{}) bool {
+ g := reflect.ValueOf(given)
+ if !g.IsValid() {
+ return true
+ }
+
+ // Basically adapted from text/template.isTrue
+ switch g.Kind() {
+ default:
+ return g.IsNil()
+ case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+ return g.Len() == 0
+ case reflect.Bool:
+ return !g.Bool()
+ case reflect.Complex64, reflect.Complex128:
+ return g.Complex() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return g.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return g.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return g.Float() == 0
+ case reflect.Struct:
+ return false
+ }
+}
+
+// coalesce returns the first non-empty value.
+func coalesce(v ...interface{}) interface{} {
+ for _, val := range v {
+ if !empty(val) {
+ return val
+ }
+ }
+ return nil
+}
+
+// all returns true if empty(x) is false for all values x in the list.
+// If the list is empty, return true.
+func all(v ...interface{}) bool {
+ for _, val := range v {
+ if empty(val) {
+ return false
+ }
+ }
+ return true
+}
+
+// any returns true if empty(x) is false for any x in the list.
+// If the list is empty, return false.
+func any(v ...interface{}) bool {
+ for _, val := range v {
+ if !empty(val) {
+ return true
+ }
+ }
+ return false
+}
+
+// fromJson decodes JSON into a structured value, ignoring errors.
+func fromJson(v string) interface{} {
+ output, _ := mustFromJson(v)
+ return output
+}
+
+// mustFromJson decodes JSON into a structured value, returning errors.
+func mustFromJson(v string) (interface{}, error) {
+ var output interface{}
+ err := json.Unmarshal([]byte(v), &output)
+ return output, err
+}
+
+// toJson encodes an item into a JSON string
+func toJson(v interface{}) string {
+ output, _ := json.Marshal(v)
+ return string(output)
+}
+
+func mustToJson(v interface{}) (string, error) {
+ output, err := json.Marshal(v)
+ if err != nil {
+ return "", err
+ }
+ return string(output), nil
+}
+
+// toPrettyJson encodes an item into a pretty (indented) JSON string
+func toPrettyJson(v interface{}) string {
+ output, _ := json.MarshalIndent(v, "", " ")
+ return string(output)
+}
+
+func mustToPrettyJson(v interface{}) (string, error) {
+ output, err := json.MarshalIndent(v, "", " ")
+ if err != nil {
+ return "", err
+ }
+ return string(output), nil
+}
+
+// toRawJson encodes an item into a JSON string with no escaping of HTML characters.
+func toRawJson(v interface{}) string {
+ output, err := mustToRawJson(v)
+ if err != nil {
+ panic(err)
+ }
+ return string(output)
+}
+
+// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters.
+func mustToRawJson(v interface{}) (string, error) {
+ buf := new(bytes.Buffer)
+ enc := json.NewEncoder(buf)
+ enc.SetEscapeHTML(false)
+ err := enc.Encode(&v)
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSuffix(buf.String(), "\n"), nil
+}
+
+// ternary returns the first value if the last value is true, otherwise returns the second value.
+func ternary(vt interface{}, vf interface{}, v bool) interface{} {
+ if v {
+ return vt
+ }
+
+ return vf
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/dict.go b/vendor/github.com/go-task/slim-sprig/v3/dict.go
new file mode 100644
index 0000000..77ebc61
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/dict.go
@@ -0,0 +1,118 @@
+package sprig
+
+func get(d map[string]interface{}, key string) interface{} {
+ if val, ok := d[key]; ok {
+ return val
+ }
+ return ""
+}
+
+func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} {
+ d[key] = value
+ return d
+}
+
+func unset(d map[string]interface{}, key string) map[string]interface{} {
+ delete(d, key)
+ return d
+}
+
+func hasKey(d map[string]interface{}, key string) bool {
+ _, ok := d[key]
+ return ok
+}
+
+func pluck(key string, d ...map[string]interface{}) []interface{} {
+ res := []interface{}{}
+ for _, dict := range d {
+ if val, ok := dict[key]; ok {
+ res = append(res, val)
+ }
+ }
+ return res
+}
+
+func keys(dicts ...map[string]interface{}) []string {
+ k := []string{}
+ for _, dict := range dicts {
+ for key := range dict {
+ k = append(k, key)
+ }
+ }
+ return k
+}
+
+func pick(dict map[string]interface{}, keys ...string) map[string]interface{} {
+ res := map[string]interface{}{}
+ for _, k := range keys {
+ if v, ok := dict[k]; ok {
+ res[k] = v
+ }
+ }
+ return res
+}
+
+func omit(dict map[string]interface{}, keys ...string) map[string]interface{} {
+ res := map[string]interface{}{}
+
+ omit := make(map[string]bool, len(keys))
+ for _, k := range keys {
+ omit[k] = true
+ }
+
+ for k, v := range dict {
+ if _, ok := omit[k]; !ok {
+ res[k] = v
+ }
+ }
+ return res
+}
+
+func dict(v ...interface{}) map[string]interface{} {
+ dict := map[string]interface{}{}
+ lenv := len(v)
+ for i := 0; i < lenv; i += 2 {
+ key := strval(v[i])
+ if i+1 >= lenv {
+ dict[key] = ""
+ continue
+ }
+ dict[key] = v[i+1]
+ }
+ return dict
+}
+
+func values(dict map[string]interface{}) []interface{} {
+ values := []interface{}{}
+ for _, value := range dict {
+ values = append(values, value)
+ }
+
+ return values
+}
+
+func dig(ps ...interface{}) (interface{}, error) {
+ if len(ps) < 3 {
+ panic("dig needs at least three arguments")
+ }
+ dict := ps[len(ps)-1].(map[string]interface{})
+ def := ps[len(ps)-2]
+ ks := make([]string, len(ps)-2)
+ for i := 0; i < len(ks); i++ {
+ ks[i] = ps[i].(string)
+ }
+
+ return digFromDict(dict, def, ks)
+}
+
+func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) {
+ k, ns := ks[0], ks[1:len(ks)]
+ step, has := dict[k]
+ if !has {
+ return d, nil
+ }
+ if len(ns) == 0 {
+ return step, nil
+ }
+ return digFromDict(step.(map[string]interface{}), d, ns)
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/doc.go b/vendor/github.com/go-task/slim-sprig/v3/doc.go
new file mode 100644
index 0000000..aabb9d4
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/doc.go
@@ -0,0 +1,19 @@
+/*
+Package sprig provides template functions for Go.
+
+This package contains a number of utility functions for working with data
+inside of Go `html/template` and `text/template` files.
+
+To add these functions, use the `template.Funcs()` method:
+
+ t := templates.New("foo").Funcs(sprig.FuncMap())
+
+Note that you should add the function map before you parse any template files.
+
+ In several cases, Sprig reverses the order of arguments from the way they
+ appear in the standard library. This is to make it easier to pipe
+ arguments into functions.
+
+See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions.
+*/
+package sprig
diff --git a/vendor/github.com/go-task/slim-sprig/v3/functions.go b/vendor/github.com/go-task/slim-sprig/v3/functions.go
new file mode 100644
index 0000000..5ea74f8
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/functions.go
@@ -0,0 +1,317 @@
+package sprig
+
+import (
+ "errors"
+ "html/template"
+ "math/rand"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "strconv"
+ "strings"
+ ttemplate "text/template"
+ "time"
+)
+
+// FuncMap produces the function map.
+//
+// Use this to pass the functions into the template engine:
+//
+// tpl := template.New("foo").Funcs(sprig.FuncMap()))
+//
+func FuncMap() template.FuncMap {
+ return HtmlFuncMap()
+}
+
+// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions.
+func HermeticTxtFuncMap() ttemplate.FuncMap {
+ r := TxtFuncMap()
+ for _, name := range nonhermeticFunctions {
+ delete(r, name)
+ }
+ return r
+}
+
+// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions.
+func HermeticHtmlFuncMap() template.FuncMap {
+ r := HtmlFuncMap()
+ for _, name := range nonhermeticFunctions {
+ delete(r, name)
+ }
+ return r
+}
+
+// TxtFuncMap returns a 'text/template'.FuncMap
+func TxtFuncMap() ttemplate.FuncMap {
+ return ttemplate.FuncMap(GenericFuncMap())
+}
+
+// HtmlFuncMap returns an 'html/template'.Funcmap
+func HtmlFuncMap() template.FuncMap {
+ return template.FuncMap(GenericFuncMap())
+}
+
+// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}.
+func GenericFuncMap() map[string]interface{} {
+ gfm := make(map[string]interface{}, len(genericMap))
+ for k, v := range genericMap {
+ gfm[k] = v
+ }
+ return gfm
+}
+
+// These functions are not guaranteed to evaluate to the same result for given input, because they
+// refer to the environment or global state.
+var nonhermeticFunctions = []string{
+ // Date functions
+ "date",
+ "date_in_zone",
+ "date_modify",
+ "now",
+ "htmlDate",
+ "htmlDateInZone",
+ "dateInZone",
+ "dateModify",
+
+ // Strings
+ "randAlphaNum",
+ "randAlpha",
+ "randAscii",
+ "randNumeric",
+ "randBytes",
+ "uuidv4",
+
+ // OS
+ "env",
+ "expandenv",
+
+ // Network
+ "getHostByName",
+}
+
+var genericMap = map[string]interface{}{
+ "hello": func() string { return "Hello!" },
+
+ // Date functions
+ "ago": dateAgo,
+ "date": date,
+ "date_in_zone": dateInZone,
+ "date_modify": dateModify,
+ "dateInZone": dateInZone,
+ "dateModify": dateModify,
+ "duration": duration,
+ "durationRound": durationRound,
+ "htmlDate": htmlDate,
+ "htmlDateInZone": htmlDateInZone,
+ "must_date_modify": mustDateModify,
+ "mustDateModify": mustDateModify,
+ "mustToDate": mustToDate,
+ "now": time.Now,
+ "toDate": toDate,
+ "unixEpoch": unixEpoch,
+
+ // Strings
+ "trunc": trunc,
+ "trim": strings.TrimSpace,
+ "upper": strings.ToUpper,
+ "lower": strings.ToLower,
+ "title": strings.Title,
+ "substr": substring,
+ // Switch order so that "foo" | repeat 5
+ "repeat": func(count int, str string) string { return strings.Repeat(str, count) },
+ // Deprecated: Use trimAll.
+ "trimall": func(a, b string) string { return strings.Trim(b, a) },
+ // Switch order so that "$foo" | trimall "$"
+ "trimAll": func(a, b string) string { return strings.Trim(b, a) },
+ "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) },
+ "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) },
+ // Switch order so that "foobar" | contains "foo"
+ "contains": func(substr string, str string) bool { return strings.Contains(str, substr) },
+ "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) },
+ "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) },
+ "quote": quote,
+ "squote": squote,
+ "cat": cat,
+ "indent": indent,
+ "nindent": nindent,
+ "replace": replace,
+ "plural": plural,
+ "sha1sum": sha1sum,
+ "sha256sum": sha256sum,
+ "adler32sum": adler32sum,
+ "toString": strval,
+
+ // Wrap Atoi to stop errors.
+ "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i },
+ "int64": toInt64,
+ "int": toInt,
+ "float64": toFloat64,
+ "seq": seq,
+ "toDecimal": toDecimal,
+
+ //"gt": func(a, b int) bool {return a > b},
+ //"gte": func(a, b int) bool {return a >= b},
+ //"lt": func(a, b int) bool {return a < b},
+ //"lte": func(a, b int) bool {return a <= b},
+
+ // split "/" foo/bar returns map[int]string{0: foo, 1: bar}
+ "split": split,
+ "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) },
+ // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu}
+ "splitn": splitn,
+ "toStrings": strslice,
+
+ "until": until,
+ "untilStep": untilStep,
+
+ // VERY basic arithmetic.
+ "add1": func(i interface{}) int64 { return toInt64(i) + 1 },
+ "add": func(i ...interface{}) int64 {
+ var a int64 = 0
+ for _, b := range i {
+ a += toInt64(b)
+ }
+ return a
+ },
+ "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) },
+ "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) },
+ "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) },
+ "mul": func(a interface{}, v ...interface{}) int64 {
+ val := toInt64(a)
+ for _, b := range v {
+ val = val * toInt64(b)
+ }
+ return val
+ },
+ "randInt": func(min, max int) int { return rand.Intn(max-min) + min },
+ "biggest": max,
+ "max": max,
+ "min": min,
+ "maxf": maxf,
+ "minf": minf,
+ "ceil": ceil,
+ "floor": floor,
+ "round": round,
+
+ // string slices. Note that we reverse the order b/c that's better
+ // for template processing.
+ "join": join,
+ "sortAlpha": sortAlpha,
+
+ // Defaults
+ "default": dfault,
+ "empty": empty,
+ "coalesce": coalesce,
+ "all": all,
+ "any": any,
+ "compact": compact,
+ "mustCompact": mustCompact,
+ "fromJson": fromJson,
+ "toJson": toJson,
+ "toPrettyJson": toPrettyJson,
+ "toRawJson": toRawJson,
+ "mustFromJson": mustFromJson,
+ "mustToJson": mustToJson,
+ "mustToPrettyJson": mustToPrettyJson,
+ "mustToRawJson": mustToRawJson,
+ "ternary": ternary,
+
+ // Reflection
+ "typeOf": typeOf,
+ "typeIs": typeIs,
+ "typeIsLike": typeIsLike,
+ "kindOf": kindOf,
+ "kindIs": kindIs,
+ "deepEqual": reflect.DeepEqual,
+
+ // OS:
+ "env": os.Getenv,
+ "expandenv": os.ExpandEnv,
+
+ // Network:
+ "getHostByName": getHostByName,
+
+ // Paths:
+ "base": path.Base,
+ "dir": path.Dir,
+ "clean": path.Clean,
+ "ext": path.Ext,
+ "isAbs": path.IsAbs,
+
+ // Filepaths:
+ "osBase": filepath.Base,
+ "osClean": filepath.Clean,
+ "osDir": filepath.Dir,
+ "osExt": filepath.Ext,
+ "osIsAbs": filepath.IsAbs,
+
+ // Encoding:
+ "b64enc": base64encode,
+ "b64dec": base64decode,
+ "b32enc": base32encode,
+ "b32dec": base32decode,
+
+ // Data Structures:
+ "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable.
+ "list": list,
+ "dict": dict,
+ "get": get,
+ "set": set,
+ "unset": unset,
+ "hasKey": hasKey,
+ "pluck": pluck,
+ "keys": keys,
+ "pick": pick,
+ "omit": omit,
+ "values": values,
+
+ "append": push, "push": push,
+ "mustAppend": mustPush, "mustPush": mustPush,
+ "prepend": prepend,
+ "mustPrepend": mustPrepend,
+ "first": first,
+ "mustFirst": mustFirst,
+ "rest": rest,
+ "mustRest": mustRest,
+ "last": last,
+ "mustLast": mustLast,
+ "initial": initial,
+ "mustInitial": mustInitial,
+ "reverse": reverse,
+ "mustReverse": mustReverse,
+ "uniq": uniq,
+ "mustUniq": mustUniq,
+ "without": without,
+ "mustWithout": mustWithout,
+ "has": has,
+ "mustHas": mustHas,
+ "slice": slice,
+ "mustSlice": mustSlice,
+ "concat": concat,
+ "dig": dig,
+ "chunk": chunk,
+ "mustChunk": mustChunk,
+
+ // Flow Control:
+ "fail": func(msg string) (string, error) { return "", errors.New(msg) },
+
+ // Regex
+ "regexMatch": regexMatch,
+ "mustRegexMatch": mustRegexMatch,
+ "regexFindAll": regexFindAll,
+ "mustRegexFindAll": mustRegexFindAll,
+ "regexFind": regexFind,
+ "mustRegexFind": mustRegexFind,
+ "regexReplaceAll": regexReplaceAll,
+ "mustRegexReplaceAll": mustRegexReplaceAll,
+ "regexReplaceAllLiteral": regexReplaceAllLiteral,
+ "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral,
+ "regexSplit": regexSplit,
+ "mustRegexSplit": mustRegexSplit,
+ "regexQuoteMeta": regexQuoteMeta,
+
+ // URLs:
+ "urlParse": urlParse,
+ "urlJoin": urlJoin,
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/list.go b/vendor/github.com/go-task/slim-sprig/v3/list.go
new file mode 100644
index 0000000..ca0fbb7
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/list.go
@@ -0,0 +1,464 @@
+package sprig
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+)
+
+// Reflection is used in these functions so that slices and arrays of strings,
+// ints, and other types not implementing []interface{} can be worked with.
+// For example, this is useful if you need to work on the output of regexs.
+
+func list(v ...interface{}) []interface{} {
+ return v
+}
+
+func push(list interface{}, v interface{}) []interface{} {
+ l, err := mustPush(list, v)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustPush(list interface{}, v interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return append(nl, v), nil
+
+ default:
+ return nil, fmt.Errorf("Cannot push on type %s", tp)
+ }
+}
+
+func prepend(list interface{}, v interface{}) []interface{} {
+ l, err := mustPrepend(list, v)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) {
+ //return append([]interface{}{v}, list...)
+
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return append([]interface{}{v}, nl...), nil
+
+ default:
+ return nil, fmt.Errorf("Cannot prepend on type %s", tp)
+ }
+}
+
+func chunk(size int, list interface{}) [][]interface{} {
+ l, err := mustChunk(size, list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustChunk(size int, list interface{}) ([][]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+
+ cs := int(math.Floor(float64(l-1)/float64(size)) + 1)
+ nl := make([][]interface{}, cs)
+
+ for i := 0; i < cs; i++ {
+ clen := size
+ if i == cs-1 {
+ clen = int(math.Floor(math.Mod(float64(l), float64(size))))
+ if clen == 0 {
+ clen = size
+ }
+ }
+
+ nl[i] = make([]interface{}, clen)
+
+ for j := 0; j < clen; j++ {
+ ix := i*size + j
+ nl[i][j] = l2.Index(ix).Interface()
+ }
+ }
+
+ return nl, nil
+
+ default:
+ return nil, fmt.Errorf("Cannot chunk type %s", tp)
+ }
+}
+
+func last(list interface{}) interface{} {
+ l, err := mustLast(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustLast(list interface{}) (interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ return l2.Index(l - 1).Interface(), nil
+ default:
+ return nil, fmt.Errorf("Cannot find last on type %s", tp)
+ }
+}
+
+func first(list interface{}) interface{} {
+ l, err := mustFirst(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustFirst(list interface{}) (interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ return l2.Index(0).Interface(), nil
+ default:
+ return nil, fmt.Errorf("Cannot find first on type %s", tp)
+ }
+}
+
+func rest(list interface{}) []interface{} {
+ l, err := mustRest(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustRest(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ nl := make([]interface{}, l-1)
+ for i := 1; i < l; i++ {
+ nl[i-1] = l2.Index(i).Interface()
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot find rest on type %s", tp)
+ }
+}
+
+func initial(list interface{}) []interface{} {
+ l, err := mustInitial(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustInitial(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ nl := make([]interface{}, l-1)
+ for i := 0; i < l-1; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot find initial on type %s", tp)
+ }
+}
+
+func sortAlpha(list interface{}) []string {
+ k := reflect.Indirect(reflect.ValueOf(list)).Kind()
+ switch k {
+ case reflect.Slice, reflect.Array:
+ a := strslice(list)
+ s := sort.StringSlice(a)
+ s.Sort()
+ return s
+ }
+ return []string{strval(list)}
+}
+
+func reverse(v interface{}) []interface{} {
+ l, err := mustReverse(v)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustReverse(v interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(v).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(v)
+
+ l := l2.Len()
+ // We do not sort in place because the incoming array should not be altered.
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[l-i-1] = l2.Index(i).Interface()
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot find reverse on type %s", tp)
+ }
+}
+
+func compact(list interface{}) []interface{} {
+ l, err := mustCompact(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustCompact(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !empty(item) {
+ nl = append(nl, item)
+ }
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot compact on type %s", tp)
+ }
+}
+
+func uniq(list interface{}) []interface{} {
+ l, err := mustUniq(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustUniq(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ dest := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !inList(dest, item) {
+ dest = append(dest, item)
+ }
+ }
+
+ return dest, nil
+ default:
+ return nil, fmt.Errorf("Cannot find uniq on type %s", tp)
+ }
+}
+
+func inList(haystack []interface{}, needle interface{}) bool {
+ for _, h := range haystack {
+ if reflect.DeepEqual(needle, h) {
+ return true
+ }
+ }
+ return false
+}
+
+func without(list interface{}, omit ...interface{}) []interface{} {
+ l, err := mustWithout(list, omit...)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ res := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !inList(omit, item) {
+ res = append(res, item)
+ }
+ }
+
+ return res, nil
+ default:
+ return nil, fmt.Errorf("Cannot find without on type %s", tp)
+ }
+}
+
+func has(needle interface{}, haystack interface{}) bool {
+ l, err := mustHas(needle, haystack)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustHas(needle interface{}, haystack interface{}) (bool, error) {
+ if haystack == nil {
+ return false, nil
+ }
+ tp := reflect.TypeOf(haystack).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(haystack)
+ var item interface{}
+ l := l2.Len()
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if reflect.DeepEqual(needle, item) {
+ return true, nil
+ }
+ }
+
+ return false, nil
+ default:
+ return false, fmt.Errorf("Cannot find has on type %s", tp)
+ }
+}
+
+// $list := [1, 2, 3, 4, 5]
+// slice $list -> list[0:5] = list[:]
+// slice $list 0 3 -> list[0:3] = list[:3]
+// slice $list 3 5 -> list[3:5]
+// slice $list 3 -> list[3:5] = list[3:]
+func slice(list interface{}, indices ...interface{}) interface{} {
+ l, err := mustSlice(list, indices...)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ var start, end int
+ if len(indices) > 0 {
+ start = toInt(indices[0])
+ }
+ if len(indices) < 2 {
+ end = l
+ } else {
+ end = toInt(indices[1])
+ }
+
+ return l2.Slice(start, end).Interface(), nil
+ default:
+ return nil, fmt.Errorf("list should be type of slice or array but %s", tp)
+ }
+}
+
+func concat(lists ...interface{}) interface{} {
+ var res []interface{}
+ for _, list := range lists {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+ for i := 0; i < l2.Len(); i++ {
+ res = append(res, l2.Index(i).Interface())
+ }
+ default:
+ panic(fmt.Sprintf("Cannot concat type %s as list", tp))
+ }
+ }
+ return res
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/network.go b/vendor/github.com/go-task/slim-sprig/v3/network.go
new file mode 100644
index 0000000..108d78a
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/network.go
@@ -0,0 +1,12 @@
+package sprig
+
+import (
+ "math/rand"
+ "net"
+)
+
+func getHostByName(name string) string {
+ addrs, _ := net.LookupHost(name)
+ //TODO: add error handing when release v3 comes out
+ return addrs[rand.Intn(len(addrs))]
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/numeric.go b/vendor/github.com/go-task/slim-sprig/v3/numeric.go
new file mode 100644
index 0000000..98cbb37
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/numeric.go
@@ -0,0 +1,228 @@
+package sprig
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// toFloat64 converts 64-bit floats
+func toFloat64(v interface{}) float64 {
+ if str, ok := v.(string); ok {
+ iv, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return 0
+ }
+ return iv
+ }
+
+ val := reflect.Indirect(reflect.ValueOf(v))
+ switch val.Kind() {
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return float64(val.Int())
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32:
+ return float64(val.Uint())
+ case reflect.Uint, reflect.Uint64:
+ return float64(val.Uint())
+ case reflect.Float32, reflect.Float64:
+ return val.Float()
+ case reflect.Bool:
+ if val.Bool() {
+ return 1
+ }
+ return 0
+ default:
+ return 0
+ }
+}
+
+func toInt(v interface{}) int {
+ //It's not optimal. Bud I don't want duplicate toInt64 code.
+ return int(toInt64(v))
+}
+
+// toInt64 converts integer types to 64-bit integers
+func toInt64(v interface{}) int64 {
+ if str, ok := v.(string); ok {
+ iv, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return 0
+ }
+ return iv
+ }
+
+ val := reflect.Indirect(reflect.ValueOf(v))
+ switch val.Kind() {
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return val.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32:
+ return int64(val.Uint())
+ case reflect.Uint, reflect.Uint64:
+ tv := val.Uint()
+ if tv <= math.MaxInt64 {
+ return int64(tv)
+ }
+ // TODO: What is the sensible thing to do here?
+ return math.MaxInt64
+ case reflect.Float32, reflect.Float64:
+ return int64(val.Float())
+ case reflect.Bool:
+ if val.Bool() {
+ return 1
+ }
+ return 0
+ default:
+ return 0
+ }
+}
+
+func max(a interface{}, i ...interface{}) int64 {
+ aa := toInt64(a)
+ for _, b := range i {
+ bb := toInt64(b)
+ if bb > aa {
+ aa = bb
+ }
+ }
+ return aa
+}
+
+func maxf(a interface{}, i ...interface{}) float64 {
+ aa := toFloat64(a)
+ for _, b := range i {
+ bb := toFloat64(b)
+ aa = math.Max(aa, bb)
+ }
+ return aa
+}
+
+func min(a interface{}, i ...interface{}) int64 {
+ aa := toInt64(a)
+ for _, b := range i {
+ bb := toInt64(b)
+ if bb < aa {
+ aa = bb
+ }
+ }
+ return aa
+}
+
+func minf(a interface{}, i ...interface{}) float64 {
+ aa := toFloat64(a)
+ for _, b := range i {
+ bb := toFloat64(b)
+ aa = math.Min(aa, bb)
+ }
+ return aa
+}
+
+func until(count int) []int {
+ step := 1
+ if count < 0 {
+ step = -1
+ }
+ return untilStep(0, count, step)
+}
+
+func untilStep(start, stop, step int) []int {
+ v := []int{}
+
+ if stop < start {
+ if step >= 0 {
+ return v
+ }
+ for i := start; i > stop; i += step {
+ v = append(v, i)
+ }
+ return v
+ }
+
+ if step <= 0 {
+ return v
+ }
+ for i := start; i < stop; i += step {
+ v = append(v, i)
+ }
+ return v
+}
+
+func floor(a interface{}) float64 {
+ aa := toFloat64(a)
+ return math.Floor(aa)
+}
+
+func ceil(a interface{}) float64 {
+ aa := toFloat64(a)
+ return math.Ceil(aa)
+}
+
+func round(a interface{}, p int, rOpt ...float64) float64 {
+ roundOn := .5
+ if len(rOpt) > 0 {
+ roundOn = rOpt[0]
+ }
+ val := toFloat64(a)
+ places := toFloat64(p)
+
+ var round float64
+ pow := math.Pow(10, places)
+ digit := pow * val
+ _, div := math.Modf(digit)
+ if div >= roundOn {
+ round = math.Ceil(digit)
+ } else {
+ round = math.Floor(digit)
+ }
+ return round / pow
+}
+
+// converts unix octal to decimal
+func toDecimal(v interface{}) int64 {
+ result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64)
+ if err != nil {
+ return 0
+ }
+ return result
+}
+
+func seq(params ...int) string {
+ increment := 1
+ switch len(params) {
+ case 0:
+ return ""
+ case 1:
+ start := 1
+ end := params[0]
+ if end < start {
+ increment = -1
+ }
+ return intArrayToString(untilStep(start, end+increment, increment), " ")
+ case 3:
+ start := params[0]
+ end := params[2]
+ step := params[1]
+ if end < start {
+ increment = -1
+ if step > 0 {
+ return ""
+ }
+ }
+ return intArrayToString(untilStep(start, end+increment, step), " ")
+ case 2:
+ start := params[0]
+ end := params[1]
+ step := 1
+ if end < start {
+ step = -1
+ }
+ return intArrayToString(untilStep(start, end+step, step), " ")
+ default:
+ return ""
+ }
+}
+
+func intArrayToString(slice []int, delimeter string) string {
+ return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]")
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/reflect.go b/vendor/github.com/go-task/slim-sprig/v3/reflect.go
new file mode 100644
index 0000000..8a65c13
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/reflect.go
@@ -0,0 +1,28 @@
+package sprig
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// typeIs returns true if the src is the type named in target.
+func typeIs(target string, src interface{}) bool {
+ return target == typeOf(src)
+}
+
+func typeIsLike(target string, src interface{}) bool {
+ t := typeOf(src)
+ return target == t || "*"+target == t
+}
+
+func typeOf(src interface{}) string {
+ return fmt.Sprintf("%T", src)
+}
+
+func kindIs(target string, src interface{}) bool {
+ return target == kindOf(src)
+}
+
+func kindOf(src interface{}) string {
+ return reflect.ValueOf(src).Kind().String()
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/regex.go b/vendor/github.com/go-task/slim-sprig/v3/regex.go
new file mode 100644
index 0000000..fab5510
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/regex.go
@@ -0,0 +1,83 @@
+package sprig
+
+import (
+ "regexp"
+)
+
+func regexMatch(regex string, s string) bool {
+ match, _ := regexp.MatchString(regex, s)
+ return match
+}
+
+func mustRegexMatch(regex string, s string) (bool, error) {
+ return regexp.MatchString(regex, s)
+}
+
+func regexFindAll(regex string, s string, n int) []string {
+ r := regexp.MustCompile(regex)
+ return r.FindAllString(s, n)
+}
+
+func mustRegexFindAll(regex string, s string, n int) ([]string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return []string{}, err
+ }
+ return r.FindAllString(s, n), nil
+}
+
+func regexFind(regex string, s string) string {
+ r := regexp.MustCompile(regex)
+ return r.FindString(s)
+}
+
+func mustRegexFind(regex string, s string) (string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return "", err
+ }
+ return r.FindString(s), nil
+}
+
+func regexReplaceAll(regex string, s string, repl string) string {
+ r := regexp.MustCompile(regex)
+ return r.ReplaceAllString(s, repl)
+}
+
+func mustRegexReplaceAll(regex string, s string, repl string) (string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return "", err
+ }
+ return r.ReplaceAllString(s, repl), nil
+}
+
+func regexReplaceAllLiteral(regex string, s string, repl string) string {
+ r := regexp.MustCompile(regex)
+ return r.ReplaceAllLiteralString(s, repl)
+}
+
+func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return "", err
+ }
+ return r.ReplaceAllLiteralString(s, repl), nil
+}
+
+func regexSplit(regex string, s string, n int) []string {
+ r := regexp.MustCompile(regex)
+ return r.Split(s, n)
+}
+
+func mustRegexSplit(regex string, s string, n int) ([]string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return []string{}, err
+ }
+ return r.Split(s, n), nil
+}
+
+func regexQuoteMeta(s string) string {
+ return regexp.QuoteMeta(s)
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/strings.go b/vendor/github.com/go-task/slim-sprig/v3/strings.go
new file mode 100644
index 0000000..3c62d6b
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/strings.go
@@ -0,0 +1,189 @@
+package sprig
+
+import (
+ "encoding/base32"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+func base64encode(v string) string {
+ return base64.StdEncoding.EncodeToString([]byte(v))
+}
+
+func base64decode(v string) string {
+ data, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return err.Error()
+ }
+ return string(data)
+}
+
+func base32encode(v string) string {
+ return base32.StdEncoding.EncodeToString([]byte(v))
+}
+
+func base32decode(v string) string {
+ data, err := base32.StdEncoding.DecodeString(v)
+ if err != nil {
+ return err.Error()
+ }
+ return string(data)
+}
+
+func quote(str ...interface{}) string {
+ out := make([]string, 0, len(str))
+ for _, s := range str {
+ if s != nil {
+ out = append(out, fmt.Sprintf("%q", strval(s)))
+ }
+ }
+ return strings.Join(out, " ")
+}
+
+func squote(str ...interface{}) string {
+ out := make([]string, 0, len(str))
+ for _, s := range str {
+ if s != nil {
+ out = append(out, fmt.Sprintf("'%v'", s))
+ }
+ }
+ return strings.Join(out, " ")
+}
+
+func cat(v ...interface{}) string {
+ v = removeNilElements(v)
+ r := strings.TrimSpace(strings.Repeat("%v ", len(v)))
+ return fmt.Sprintf(r, v...)
+}
+
+func indent(spaces int, v string) string {
+ pad := strings.Repeat(" ", spaces)
+ return pad + strings.Replace(v, "\n", "\n"+pad, -1)
+}
+
+func nindent(spaces int, v string) string {
+ return "\n" + indent(spaces, v)
+}
+
+func replace(old, new, src string) string {
+ return strings.Replace(src, old, new, -1)
+}
+
+func plural(one, many string, count int) string {
+ if count == 1 {
+ return one
+ }
+ return many
+}
+
+func strslice(v interface{}) []string {
+ switch v := v.(type) {
+ case []string:
+ return v
+ case []interface{}:
+ b := make([]string, 0, len(v))
+ for _, s := range v {
+ if s != nil {
+ b = append(b, strval(s))
+ }
+ }
+ return b
+ default:
+ val := reflect.ValueOf(v)
+ switch val.Kind() {
+ case reflect.Array, reflect.Slice:
+ l := val.Len()
+ b := make([]string, 0, l)
+ for i := 0; i < l; i++ {
+ value := val.Index(i).Interface()
+ if value != nil {
+ b = append(b, strval(value))
+ }
+ }
+ return b
+ default:
+ if v == nil {
+ return []string{}
+ }
+
+ return []string{strval(v)}
+ }
+ }
+}
+
+func removeNilElements(v []interface{}) []interface{} {
+ newSlice := make([]interface{}, 0, len(v))
+ for _, i := range v {
+ if i != nil {
+ newSlice = append(newSlice, i)
+ }
+ }
+ return newSlice
+}
+
+func strval(v interface{}) string {
+ switch v := v.(type) {
+ case string:
+ return v
+ case []byte:
+ return string(v)
+ case error:
+ return v.Error()
+ case fmt.Stringer:
+ return v.String()
+ default:
+ return fmt.Sprintf("%v", v)
+ }
+}
+
+func trunc(c int, s string) string {
+ if c < 0 && len(s)+c > 0 {
+ return s[len(s)+c:]
+ }
+ if c >= 0 && len(s) > c {
+ return s[:c]
+ }
+ return s
+}
+
+func join(sep string, v interface{}) string {
+ return strings.Join(strslice(v), sep)
+}
+
+func split(sep, orig string) map[string]string {
+ parts := strings.Split(orig, sep)
+ res := make(map[string]string, len(parts))
+ for i, v := range parts {
+ res["_"+strconv.Itoa(i)] = v
+ }
+ return res
+}
+
+func splitn(sep string, n int, orig string) map[string]string {
+ parts := strings.SplitN(orig, sep, n)
+ res := make(map[string]string, len(parts))
+ for i, v := range parts {
+ res["_"+strconv.Itoa(i)] = v
+ }
+ return res
+}
+
+// substring creates a substring of the given string.
+//
+// If start is < 0, this calls string[:end].
+//
+// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:]
+//
+// Otherwise, this calls string[start, end].
+func substring(start, end int, s string) string {
+ if start < 0 {
+ return s[:end]
+ }
+ if end < 0 || end > len(s) {
+ return s[start:]
+ }
+ return s[start:end]
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/url.go b/vendor/github.com/go-task/slim-sprig/v3/url.go
new file mode 100644
index 0000000..b8e120e
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/url.go
@@ -0,0 +1,66 @@
+package sprig
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+)
+
+func dictGetOrEmpty(dict map[string]interface{}, key string) string {
+ value, ok := dict[key]
+ if !ok {
+ return ""
+ }
+ tp := reflect.TypeOf(value).Kind()
+ if tp != reflect.String {
+ panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String()))
+ }
+ return reflect.ValueOf(value).String()
+}
+
+// parses given URL to return dict object
+func urlParse(v string) map[string]interface{} {
+ dict := map[string]interface{}{}
+ parsedURL, err := url.Parse(v)
+ if err != nil {
+ panic(fmt.Sprintf("unable to parse url: %s", err))
+ }
+ dict["scheme"] = parsedURL.Scheme
+ dict["host"] = parsedURL.Host
+ dict["hostname"] = parsedURL.Hostname()
+ dict["path"] = parsedURL.Path
+ dict["query"] = parsedURL.RawQuery
+ dict["opaque"] = parsedURL.Opaque
+ dict["fragment"] = parsedURL.Fragment
+ if parsedURL.User != nil {
+ dict["userinfo"] = parsedURL.User.String()
+ } else {
+ dict["userinfo"] = ""
+ }
+
+ return dict
+}
+
+// join given dict to URL string
+func urlJoin(d map[string]interface{}) string {
+ resURL := url.URL{
+ Scheme: dictGetOrEmpty(d, "scheme"),
+ Host: dictGetOrEmpty(d, "host"),
+ Path: dictGetOrEmpty(d, "path"),
+ RawQuery: dictGetOrEmpty(d, "query"),
+ Opaque: dictGetOrEmpty(d, "opaque"),
+ Fragment: dictGetOrEmpty(d, "fragment"),
+ }
+ userinfo := dictGetOrEmpty(d, "userinfo")
+ var user *url.Userinfo
+ if userinfo != "" {
+ tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo))
+ if err != nil {
+ panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err))
+ }
+ user = tempURL.User
+ }
+
+ resURL.User = user
+ return resURL.String()
+}
diff --git a/vendor/github.com/google/cel-go/LICENSE b/vendor/github.com/google/cel-go/LICENSE
new file mode 100644
index 0000000..2493ed2
--- /dev/null
+++ b/vendor/github.com/google/cel-go/LICENSE
@@ -0,0 +1,233 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+===========================================================================
+The common/types/pb/equal.go modification of proto.Equal logic
+===========================================================================
+Copyright (c) 2018 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel
new file mode 100644
index 0000000..0905f63
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/BUILD.bazel
@@ -0,0 +1,83 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "cel.go",
+ "decls.go",
+ "env.go",
+ "io.go",
+ "library.go",
+ "macro.go",
+ "options.go",
+ "program.go",
+ "validator.go",
+ ],
+ importpath = "github.com/google/cel-go/cel",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//checker:go_default_library",
+ "//checker/decls:go_default_library",
+ "//common:go_default_library",
+ "//common/ast:go_default_library",
+ "//common/containers:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/functions:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/stdlib:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/pb:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "//interpreter:go_default_library",
+ "//parser:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protodesc:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoregistry:go_default_library",
+ "@org_golang_google_protobuf//types/descriptorpb:go_default_library",
+ "@org_golang_google_protobuf//types/dynamicpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ srcs = [
+ "cel_example_test.go",
+ "cel_test.go",
+ "decls_test.go",
+ "env_test.go",
+ "io_test.go",
+ ],
+ data = [
+ "//cel/testdata:gen_test_fds",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "//test:go_default_library",
+ "//test/proto2pb:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//encoding/prototext:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/cel/cel.go b/vendor/github.com/google/cel-go/cel/cel.go
new file mode 100644
index 0000000..eb5a9f4
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/cel.go
@@ -0,0 +1,19 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cel defines the top-level interface for the Common Expression Language (CEL).
+//
+// CEL is a non-Turing complete expression language designed to parse, check, and evaluate
+// expressions against user-defined environments.
+package cel
diff --git a/vendor/github.com/google/cel-go/cel/decls.go b/vendor/github.com/google/cel-go/cel/decls.go
new file mode 100644
index 0000000..0f95013
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/decls.go
@@ -0,0 +1,395 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Kind indicates a CEL type's kind which is used to differentiate quickly between simple and complex types.
+type Kind = types.Kind
+
+const (
+ // DynKind represents a dynamic type. This kind only exists at type-check time.
+ DynKind Kind = types.DynKind
+
+ // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time.
+ AnyKind = types.AnyKind
+
+ // BoolKind represents a boolean type.
+ BoolKind = types.BoolKind
+
+ // BytesKind represents a bytes type.
+ BytesKind = types.BytesKind
+
+ // DoubleKind represents a double type.
+ DoubleKind = types.DoubleKind
+
+ // DurationKind represents a CEL duration type.
+ DurationKind = types.DurationKind
+
+ // IntKind represents an integer type.
+ IntKind = types.IntKind
+
+ // ListKind represents a list type.
+ ListKind = types.ListKind
+
+ // MapKind represents a map type.
+ MapKind = types.MapKind
+
+ // NullTypeKind represents a null type.
+ NullTypeKind = types.NullTypeKind
+
+ // OpaqueKind represents an abstract type which has no accessible fields.
+ OpaqueKind = types.OpaqueKind
+
+ // StringKind represents a string type.
+ StringKind = types.StringKind
+
+ // StructKind represents a structured object with typed fields.
+ StructKind = types.StructKind
+
+ // TimestampKind represents a a CEL time type.
+ TimestampKind = types.TimestampKind
+
+ // TypeKind represents the CEL type.
+ TypeKind = types.TypeKind
+
+ // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible.
+ TypeParamKind = types.TypeParamKind
+
+ // UintKind represents a uint type.
+ UintKind = types.UintKind
+)
+
+var (
+ // AnyType represents the google.protobuf.Any type.
+ AnyType = types.AnyType
+ // BoolType represents the bool type.
+ BoolType = types.BoolType
+ // BytesType represents the bytes type.
+ BytesType = types.BytesType
+ // DoubleType represents the double type.
+ DoubleType = types.DoubleType
+ // DurationType represents the CEL duration type.
+ DurationType = types.DurationType
+ // DynType represents a dynamic CEL type whose type will be determined at runtime from context.
+ DynType = types.DynType
+ // IntType represents the int type.
+ IntType = types.IntType
+ // NullType represents the type of a null value.
+ NullType = types.NullType
+ // StringType represents the string type.
+ StringType = types.StringType
+ // TimestampType represents the time type.
+ TimestampType = types.TimestampType
+ // TypeType represents a CEL type
+ TypeType = types.TypeType
+ // UintType represents a uint type.
+ UintType = types.UintType
+
+ // function references for instantiating new types.
+
+ // ListType creates an instances of a list type value with the provided element type.
+ ListType = types.NewListType
+ // MapType creates an instance of a map type value with the provided key and value types.
+ MapType = types.NewMapType
+ // NullableType creates an instance of a nullable type with the provided wrapped type.
+ //
+ // Note: only primitive types are supported as wrapped types.
+ NullableType = types.NewNullableType
+ // OptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional.
+ OptionalType = types.NewOptionalType
+ // OpaqueType creates an abstract parameterized type with a given name.
+ OpaqueType = types.NewOpaqueType
+ // ObjectType creates a type references to an externally defined type, e.g. a protobuf message type.
+ ObjectType = types.NewObjectType
+ // TypeParamType creates a parameterized type instance.
+ TypeParamType = types.NewTypeParamType
+)
+
+// Type holds a reference to a runtime type with an optional type-checked set of type parameters.
+type Type = types.Type
+
+// Constant creates an instances of an identifier declaration with a variable name, type, and value.
+func Constant(name string, t *Type, v ref.Val) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.variables = append(e.variables, decls.NewConstant(name, t, v))
+ return e, nil
+ }
+}
+
+// Variable creates an instance of a variable declaration with a variable name and type.
+func Variable(name string, t *Type) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.variables = append(e.variables, decls.NewVariable(name, t))
+ return e, nil
+ }
+}
+
+// Function defines a function and overloads with optional singleton or per-overload bindings.
+//
+// Using Function is roughly equivalent to calling Declarations() to declare the function signatures
+// and Functions() to define the function bindings, if they have been defined. Specifying the
+// same function name more than once will result in the aggregation of the function overloads. If any
+// signatures conflict between the existing and new function definition an error will be raised.
+// However, if the signatures are identical and the overload ids are the same, the redefinition will
+// be considered a no-op.
+//
+// One key difference with using Function() is that each FunctionDecl provided will handle dynamic
+// dispatch based on the type-signatures of the overloads provided which means overload resolution at
+// runtime is handled out of the box rather than via a custom binding for overload resolution via
+// Functions():
+//
+// - Overloads are searched in the order they are declared
+// - Dynamic dispatch for lists and maps is limited by inspection of the list and map contents
+//
+// at runtime. Empty lists and maps will result in a 'default dispatch'
+//
+// - In the event that a default dispatch occurs, the first overload provided is the one invoked
+//
+// If you intend to use overloads which differentiate based on the key or element type of a list or
+// map, consider using a generic function instead: e.g. func(list(T)) or func(map(K, V)) as this
+// will allow your implementation to determine how best to handle dispatch and the default behavior
+// for empty lists and maps whose contents cannot be inspected.
+//
+// For functions which use parameterized opaque types (abstract types), consider using a singleton
+// function which is capable of inspecting the contents of the type and resolving the appropriate
+// overload as CEL can only make inferences by type-name regarding such types.
+func Function(name string, opts ...FunctionOpt) EnvOption {
+ return func(e *Env) (*Env, error) {
+ fn, err := decls.NewFunction(name, opts...)
+ if err != nil {
+ return nil, err
+ }
+ if existing, found := e.functions[fn.Name()]; found {
+ fn, err = existing.Merge(fn)
+ if err != nil {
+ return nil, err
+ }
+ }
+ e.functions[fn.Name()] = fn
+ return e, nil
+ }
+}
+
+// FunctionOpt defines a functional option for configuring a function declaration.
+type FunctionOpt = decls.FunctionOpt
+
+// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt {
+ return decls.SingletonUnaryBinding(fn, traits...)
+}
+
+// SingletonBinaryImpl creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+//
+// Deprecated: use SingletonBinaryBinding
+func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt {
+ return decls.SingletonBinaryBinding(fn, traits...)
+}
+
+// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt {
+ return decls.SingletonBinaryBinding(fn, traits...)
+}
+
+// SingletonFunctionImpl creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+//
+// Deprecated: use SingletonFunctionBinding
+func SingletonFunctionImpl(fn functions.FunctionOp, traits ...int) FunctionOpt {
+ return decls.SingletonFunctionBinding(fn, traits...)
+}
+
+// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt {
+ return decls.SingletonFunctionBinding(fn, traits...)
+}
+
+// DisableDeclaration disables the function signatures, effectively removing them from the type-check
+// environment while preserving the runtime bindings.
+func DisableDeclaration(value bool) FunctionOpt {
+ return decls.DisableDeclaration(value)
+}
+
+// Overload defines a new global overload with an overload id, argument types, and result type. Through the
+// use of OverloadOpt options, the overload may also be configured with a binding, an operand trait, and to
+// be non-strict.
+//
+// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
+// strict-ness should be rare occurrences.
+func Overload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt {
+ return decls.Overload(overloadID, args, resultType, opts...)
+}
+
+// MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types,
+// and result type. Through the use of OverloadOpt options, the overload may also be configured with a binding,
+// an operand trait, and to be non-strict.
+//
+// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
+// strict-ness should be rare occurrences.
+func MemberOverload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt {
+ return decls.MemberOverload(overloadID, args, resultType, opts...)
+}
+
+// OverloadOpt is a functional option for configuring a function overload.
+type OverloadOpt = decls.OverloadOpt
+
+// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func UnaryBinding(binding functions.UnaryOp) OverloadOpt {
+ return decls.UnaryBinding(binding)
+}
+
+// BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func BinaryBinding(binding functions.BinaryOp) OverloadOpt {
+ return decls.BinaryBinding(binding)
+}
+
+// FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func FunctionBinding(binding functions.FunctionOp) OverloadOpt {
+ return decls.FunctionBinding(binding)
+}
+
+// OverloadIsNonStrict enables the function to be called with error and unknown argument values.
+//
+// Note: do not use this option unless absoluately necessary as it should be an uncommon feature.
+func OverloadIsNonStrict() OverloadOpt {
+ return decls.OverloadIsNonStrict()
+}
+
+// OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be
+// successfully invoked.
+func OverloadOperandTrait(trait int) OverloadOpt {
+ return decls.OverloadOperandTrait(trait)
+}
+
+// TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation.
+func TypeToExprType(t *Type) (*exprpb.Type, error) {
+ return types.TypeToExprType(t)
+}
+
+// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation.
+func ExprTypeToType(t *exprpb.Type) (*Type, error) {
+ return types.ExprTypeToType(t)
+}
+
+// ExprDeclToDeclaration converts a protobuf CEL declaration to a CEL-native declaration, either a Variable or Function.
+func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) {
+ switch d.GetDeclKind().(type) {
+ case *exprpb.Decl_Function:
+ overloads := d.GetFunction().GetOverloads()
+ opts := make([]FunctionOpt, len(overloads))
+ for i, o := range overloads {
+ args := make([]*Type, len(o.GetParams()))
+ for j, p := range o.GetParams() {
+ a, err := types.ExprTypeToType(p)
+ if err != nil {
+ return nil, err
+ }
+ args[j] = a
+ }
+ res, err := types.ExprTypeToType(o.GetResultType())
+ if err != nil {
+ return nil, err
+ }
+ if o.IsInstanceFunction {
+ opts[i] = decls.MemberOverload(o.GetOverloadId(), args, res)
+ } else {
+ opts[i] = decls.Overload(o.GetOverloadId(), args, res)
+ }
+ }
+ return Function(d.GetName(), opts...), nil
+ case *exprpb.Decl_Ident:
+ t, err := types.ExprTypeToType(d.GetIdent().GetType())
+ if err != nil {
+ return nil, err
+ }
+ if d.GetIdent().GetValue() == nil {
+ return Variable(d.GetName(), t), nil
+ }
+ val, err := ast.ConstantToVal(d.GetIdent().GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return Constant(d.GetName(), t, val), nil
+ default:
+ return nil, fmt.Errorf("unsupported decl: %v", d)
+ }
+}
+
+func typeValueToKind(tv ref.Type) (Kind, error) {
+ switch tv {
+ case types.BoolType:
+ return BoolKind, nil
+ case types.DoubleType:
+ return DoubleKind, nil
+ case types.IntType:
+ return IntKind, nil
+ case types.UintType:
+ return UintKind, nil
+ case types.ListType:
+ return ListKind, nil
+ case types.MapType:
+ return MapKind, nil
+ case types.StringType:
+ return StringKind, nil
+ case types.BytesType:
+ return BytesKind, nil
+ case types.DurationType:
+ return DurationKind, nil
+ case types.TimestampType:
+ return TimestampKind, nil
+ case types.NullType:
+ return NullTypeKind, nil
+ case types.TypeType:
+ return TypeKind, nil
+ default:
+ switch tv.TypeName() {
+ case "dyn":
+ return DynKind, nil
+ case "google.protobuf.Any":
+ return AnyKind, nil
+ case "optional":
+ return OpaqueKind, nil
+ default:
+ return 0, fmt.Errorf("no known conversion for type of %s", tv.TypeName())
+ }
+ }
+}
diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go
new file mode 100644
index 0000000..b5c3b4c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/env.go
@@ -0,0 +1,893 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "errors"
+ "sync"
+
+ "github.com/google/cel-go/checker"
+ chkdecls "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common"
+ celast "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/interpreter"
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Source interface representing a user-provided expression.
+type Source = common.Source
+
+// Ast representing the checked or unchecked expression, its source, and related metadata such as
+// source position information.
+type Ast struct {
+ expr *exprpb.Expr
+ info *exprpb.SourceInfo
+ source Source
+ refMap map[int64]*celast.ReferenceInfo
+ typeMap map[int64]*types.Type
+}
+
+// Expr returns the proto serializable instance of the parsed/checked expression.
+func (ast *Ast) Expr() *exprpb.Expr {
+ return ast.expr
+}
+
+// IsChecked returns whether the Ast value has been successfully type-checked.
+func (ast *Ast) IsChecked() bool {
+ return ast.typeMap != nil && len(ast.typeMap) > 0
+}
+
+// SourceInfo returns character offset and newline position information about expression elements.
+func (ast *Ast) SourceInfo() *exprpb.SourceInfo {
+ return ast.info
+}
+
+// ResultType returns the output type of the expression if the Ast has been type-checked, else
+// returns chkdecls.Dyn as the parse step cannot infer the type.
+//
+// Deprecated: use OutputType
+func (ast *Ast) ResultType() *exprpb.Type {
+ if !ast.IsChecked() {
+ return chkdecls.Dyn
+ }
+ out := ast.OutputType()
+ t, err := TypeToExprType(out)
+ if err != nil {
+ return chkdecls.Dyn
+ }
+ return t
+}
+
+// OutputType returns the output type of the expression if the Ast has been type-checked, else
+// returns cel.DynType as the parse step cannot infer types.
+func (ast *Ast) OutputType() *Type {
+ t, found := ast.typeMap[ast.expr.GetId()]
+ if !found {
+ return DynType
+ }
+ return t
+}
+
+// Source returns a view of the input used to create the Ast. This source may be complete or
+// constructed from the SourceInfo.
+func (ast *Ast) Source() Source {
+ return ast.source
+}
+
+// FormatType converts a type message into a string representation.
+//
+// Deprecated: prefer FormatCELType
+func FormatType(t *exprpb.Type) string {
+ return checker.FormatCheckedType(t)
+}
+
+// FormatCELType formats a cel.Type value to a string representation.
+//
+// The type formatting is identical to FormatType.
+func FormatCELType(t *Type) string {
+ return checker.FormatCELType(t)
+}
+
+// Env encapsulates the context necessary to perform parsing, type checking, or generation of
+// evaluable programs for different expressions.
+type Env struct {
+ Container *containers.Container
+ variables []*decls.VariableDecl
+ functions map[string]*decls.FunctionDecl
+ macros []parser.Macro
+ adapter types.Adapter
+ provider types.Provider
+ features map[int]bool
+ appliedFeatures map[int]bool
+ libraries map[string]bool
+ validators []ASTValidator
+ costOptions []checker.CostOption
+
+ // Internal parser representation
+ prsr *parser.Parser
+ prsrOpts []parser.Option
+
+ // Internal checker representation
+ chkMutex sync.Mutex
+ chk *checker.Env
+ chkErr error
+ chkOnce sync.Once
+ chkOpts []checker.Option
+
+ // Program options tied to the environment
+ progOpts []ProgramOption
+}
+
+// NewEnv creates a program environment configured with the standard library of CEL functions and
+// macros. The Env value returned can parse and check any CEL program which builds upon the core
+// features documented in the CEL specification.
+//
+// See the EnvOption helper functions for the options that can be used to configure the
+// environment.
+func NewEnv(opts ...EnvOption) (*Env, error) {
+ // Extend the statically configured standard environment, disabling eager validation to ensure
+ // the cost of setup for the environment is still just as cheap as it is in v0.11.x and earlier
+ // releases. The user provided options can easily re-enable the eager validation as they are
+ // processed after this default option.
+ stdOpts := append([]EnvOption{EagerlyValidateDeclarations(false)}, opts...)
+ env, err := getStdEnv()
+ if err != nil {
+ return nil, err
+ }
+ return env.Extend(stdOpts...)
+}
+
+// NewCustomEnv creates a custom program environment which is not automatically configured with the
+// standard library of functions and macros documented in the CEL spec.
+//
+// The purpose for using a custom environment might be for subsetting the standard library produced
+// by the cel.StdLib() function. Subsetting CEL is a core aspect of its design that allows users to
+// limit the compute and memory impact of a CEL program by controlling the functions and macros
+// that may appear in a given expression.
+//
+// See the EnvOption helper functions for the options that can be used to configure the
+// environment.
+func NewCustomEnv(opts ...EnvOption) (*Env, error) {
+ registry, err := types.NewRegistry()
+ if err != nil {
+ return nil, err
+ }
+ return (&Env{
+ variables: []*decls.VariableDecl{},
+ functions: map[string]*decls.FunctionDecl{},
+ macros: []parser.Macro{},
+ Container: containers.DefaultContainer,
+ adapter: registry,
+ provider: registry,
+ features: map[int]bool{},
+ appliedFeatures: map[int]bool{},
+ libraries: map[string]bool{},
+ validators: []ASTValidator{},
+ progOpts: []ProgramOption{},
+ costOptions: []checker.CostOption{},
+ }).configure(opts)
+}
+
+// Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues.
+// If any `ASTValidators` are configured on the environment, they will be applied after a valid
+// type-check result. If any issues are detected, the validators will provide them on the
+// output Issues object.
+//
+// Either checking or validation has failed if the returned Issues value and its Issues.Err()
+// value are non-nil. Issues should be inspected if they are non-nil, but may not represent a
+// fatal error.
+//
+// It is possible to have both non-nil Ast and Issues values returned from this call: however,
+// the mere presence of an Ast does not imply that it is valid for use.
+func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
+ // Note, errors aren't currently possible on the Ast to ParsedExpr conversion.
+ pe, _ := AstToParsedExpr(ast)
+
+ // Construct the internal checker env, erroring if there is an issue adding the declarations.
+ chk, err := e.initChecker()
+ if err != nil {
+ errs := common.NewErrors(ast.Source())
+ errs.ReportError(common.NoLocation, err.Error())
+ return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo())
+ }
+
+ res, errs := checker.Check(pe, ast.Source(), chk)
+ if len(errs.GetErrors()) > 0 {
+ return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo())
+ }
+ // Manually create the Ast to ensure that the Ast source information (which may be more
+ // detailed than the information provided by Check), is returned to the caller.
+ ast = &Ast{
+ source: ast.Source(),
+ expr: res.Expr,
+ info: res.SourceInfo,
+ refMap: res.ReferenceMap,
+ typeMap: res.TypeMap}
+
+ // Generate a validator configuration from the set of configured validators.
+ vConfig := newValidatorConfig()
+ for _, v := range e.validators {
+ if cv, ok := v.(ASTValidatorConfigurer); ok {
+ cv.Configure(vConfig)
+ }
+ }
+ // Apply additional validators on the type-checked result.
+ iss := NewIssuesWithSourceInfo(errs, ast.SourceInfo())
+ for _, v := range e.validators {
+ v.Validate(e, vConfig, res, iss)
+ }
+ if iss.Err() != nil {
+ return nil, iss
+ }
+ return ast, nil
+}
+
+// Compile combines the Parse and Check phases CEL program compilation to produce an Ast and
+// associated issues.
+//
+// If an error is encountered during parsing the Compile step will not continue with the Check
+// phase. If non-error issues are encountered during Parse, they may be combined with any issues
+// discovered during Check.
+//
+// Note, for parse-only uses of CEL use Parse.
+func (e *Env) Compile(txt string) (*Ast, *Issues) {
+ return e.CompileSource(common.NewTextSource(txt))
+}
+
+// CompileSource combines the Parse and Check phases CEL program compilation to produce an Ast and
+// associated issues.
+//
+// If an error is encountered during parsing the CompileSource step will not continue with the
+// Check phase. If non-error issues are encountered during Parse, they may be combined with any
+// issues discovered during Check.
+//
+// Note, for parse-only uses of CEL use Parse.
+func (e *Env) CompileSource(src Source) (*Ast, *Issues) {
+ ast, iss := e.ParseSource(src)
+ if iss.Err() != nil {
+ return nil, iss
+ }
+ checked, iss2 := e.Check(ast)
+ if iss2.Err() != nil {
+ return nil, iss2
+ }
+ return checked, iss2
+}
+
+// Extend the current environment with additional options to produce a new Env.
+//
+// Note, the extended Env value should not share memory with the original. It is possible, however,
+// that a CustomTypeAdapter or CustomTypeProvider options could provide values which are mutable.
+// To ensure separation of state between extended environments either make sure the TypeAdapter and
+// TypeProvider are immutable, or that their underlying implementations are based on the
+// ref.TypeRegistry which provides a Copy method which will be invoked by this method.
+func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
+ chk, chkErr := e.getCheckerOrError()
+ if chkErr != nil {
+ return nil, chkErr
+ }
+
+ prsrOptsCopy := make([]parser.Option, len(e.prsrOpts))
+ copy(prsrOptsCopy, e.prsrOpts)
+
+ // The type-checker is configured with Declarations. The declarations may either be provided
+ // as options which have not yet been validated, or may come from a previous checker instance
+ // whose types have already been validated.
+ chkOptsCopy := make([]checker.Option, len(e.chkOpts))
+ copy(chkOptsCopy, e.chkOpts)
+
+ // Copy the declarations if needed.
+ varsCopy := []*decls.VariableDecl{}
+ if chk != nil {
+ // If the type-checker has already been instantiated, then the e.declarations have been
+ // validated within the chk instance.
+ chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(chk))
+ } else {
+ // If the type-checker has not been instantiated, ensure the unvalidated declarations are
+ // provided to the extended Env instance.
+ varsCopy = make([]*decls.VariableDecl, len(e.variables))
+ copy(varsCopy, e.variables)
+ }
+
+ // Copy macros and program options
+ macsCopy := make([]parser.Macro, len(e.macros))
+ progOptsCopy := make([]ProgramOption, len(e.progOpts))
+ copy(macsCopy, e.macros)
+ copy(progOptsCopy, e.progOpts)
+
+ // Copy the adapter / provider if they appear to be mutable.
+ adapter := e.adapter
+ provider := e.provider
+ adapterReg, isAdapterReg := e.adapter.(*types.Registry)
+ providerReg, isProviderReg := e.provider.(*types.Registry)
+ // In most cases the provider and adapter will be a ref.TypeRegistry;
+ // however, in the rare cases where they are not, they are assumed to
+ // be immutable. Since it is possible to set the TypeProvider separately
+ // from the TypeAdapter, the possible configurations which could use a
+ // TypeRegistry as the base implementation are captured below.
+ if isAdapterReg && isProviderReg {
+ reg := providerReg.Copy()
+ provider = reg
+ // If the adapter and provider are the same object, set the adapter
+ // to the same ref.TypeRegistry as the provider.
+ if adapterReg == providerReg {
+ adapter = reg
+ } else {
+ // Otherwise, make a copy of the adapter.
+ adapter = adapterReg.Copy()
+ }
+ } else if isProviderReg {
+ provider = providerReg.Copy()
+ } else if isAdapterReg {
+ adapter = adapterReg.Copy()
+ }
+
+ featuresCopy := make(map[int]bool, len(e.features))
+ for k, v := range e.features {
+ featuresCopy[k] = v
+ }
+ appliedFeaturesCopy := make(map[int]bool, len(e.appliedFeatures))
+ for k, v := range e.appliedFeatures {
+ appliedFeaturesCopy[k] = v
+ }
+ funcsCopy := make(map[string]*decls.FunctionDecl, len(e.functions))
+ for k, v := range e.functions {
+ funcsCopy[k] = v
+ }
+ libsCopy := make(map[string]bool, len(e.libraries))
+ for k, v := range e.libraries {
+ libsCopy[k] = v
+ }
+ validatorsCopy := make([]ASTValidator, len(e.validators))
+ copy(validatorsCopy, e.validators)
+ costOptsCopy := make([]checker.CostOption, len(e.costOptions))
+ copy(costOptsCopy, e.costOptions)
+
+ ext := &Env{
+ Container: e.Container,
+ variables: varsCopy,
+ functions: funcsCopy,
+ macros: macsCopy,
+ progOpts: progOptsCopy,
+ adapter: adapter,
+ features: featuresCopy,
+ appliedFeatures: appliedFeaturesCopy,
+ libraries: libsCopy,
+ validators: validatorsCopy,
+ provider: provider,
+ chkOpts: chkOptsCopy,
+ prsrOpts: prsrOptsCopy,
+ costOptions: costOptsCopy,
+ }
+ return ext.configure(opts)
+}
+
+// HasFeature checks whether the environment enables the given feature
+// flag, as enumerated in options.go.
+func (e *Env) HasFeature(flag int) bool {
+ enabled, has := e.features[flag]
+ return has && enabled
+}
+
+// HasLibrary returns whether a specific SingletonLibrary has been configured in the environment.
+func (e *Env) HasLibrary(libName string) bool {
+ configured, exists := e.libraries[libName]
+ return exists && configured
+}
+
+// Libraries returns a list of SingletonLibrary that have been configured in the environment.
+func (e *Env) Libraries() []string {
+ libraries := make([]string, 0, len(e.libraries))
+ for libName := range e.libraries {
+ libraries = append(libraries, libName)
+ }
+ return libraries
+}
+
+// HasValidator returns whether a specific ASTValidator has been configured in the environment.
+func (e *Env) HasValidator(name string) bool {
+ for _, v := range e.validators {
+ if v.Name() == name {
+ return true
+ }
+ }
+ return false
+}
+
+// Parse parses the input expression value `txt` to a Ast and/or a set of Issues.
+//
+// This form of Parse creates a Source value for the input `txt` and forwards to the
+// ParseSource method.
+func (e *Env) Parse(txt string) (*Ast, *Issues) {
+ src := common.NewTextSource(txt)
+ return e.ParseSource(src)
+}
+
+// ParseSource parses the input source to an Ast and/or set of Issues.
+//
+// Parsing has failed if the returned Issues value and its Issues.Err() value is non-nil.
+// Issues should be inspected if they are non-nil, but may not represent a fatal error.
+//
+// It is possible to have both non-nil Ast and Issues values returned from this call; however,
+// the mere presence of an Ast does not imply that it is valid for use.
+func (e *Env) ParseSource(src Source) (*Ast, *Issues) {
+ res, errs := e.prsr.Parse(src)
+ if len(errs.GetErrors()) > 0 {
+ return nil, &Issues{errs: errs}
+ }
+ // Manually create the Ast to ensure that the text source information is propagated on
+ // subsequent calls to Check.
+ return &Ast{
+ source: src,
+ expr: res.GetExpr(),
+ info: res.GetSourceInfo()}, nil
+}
+
+// Program generates an evaluable instance of the Ast within the environment (Env).
+func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) {
+ optSet := e.progOpts
+ if len(opts) != 0 {
+ mergedOpts := []ProgramOption{}
+ mergedOpts = append(mergedOpts, e.progOpts...)
+ mergedOpts = append(mergedOpts, opts...)
+ optSet = mergedOpts
+ }
+ return newProgram(e, ast, optSet)
+}
+
+// CELTypeAdapter returns the `types.Adapter` configured for the environment.
+func (e *Env) CELTypeAdapter() types.Adapter {
+ return e.adapter
+}
+
+// CELTypeProvider returns the `types.Provider` configured for the environment.
+func (e *Env) CELTypeProvider() types.Provider {
+ return e.provider
+}
+
+// TypeAdapter returns the `ref.TypeAdapter` configured for the environment.
+//
+// Deprecated: use CELTypeAdapter()
+func (e *Env) TypeAdapter() ref.TypeAdapter {
+ return e.adapter
+}
+
+// TypeProvider returns the `ref.TypeProvider` configured for the environment.
+//
+// Deprecated: use CELTypeProvider()
+func (e *Env) TypeProvider() ref.TypeProvider {
+ if legacyProvider, ok := e.provider.(ref.TypeProvider); ok {
+ return legacyProvider
+ }
+ return &interopLegacyTypeProvider{Provider: e.provider}
+}
+
+// UnknownVars returns an interpreter.PartialActivation which marks all variables declared in the
+// Env as unknown AttributePattern values.
+//
+// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation unless the
+// PartialAttributes option is provided as a ProgramOption.
+func (e *Env) UnknownVars() interpreter.PartialActivation {
+ act := interpreter.EmptyActivation()
+ part, _ := PartialVars(act, e.computeUnknownVars(act)...)
+ return part
+}
+
+// PartialVars returns an interpreter.PartialActivation where all variables not in the input variable
+// set, but which have been configured in the environment, are marked as unknown.
+//
+// The `vars` value may either be an interpreter.Activation or any valid input to the
+// interpreter.NewActivation call.
+//
+// Note, this is equivalent to calling cel.PartialVars and manually configuring the set of unknown
+// variables. For more advanced use cases of partial state where portions of an object graph, rather
+// than top-level variables, are missing the PartialVars() method may be a more suitable choice.
+//
+// Note, the PartialVars will behave the same as an interpreter.EmptyActivation unless the
+// PartialAttributes option is provided as a ProgramOption.
+func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) {
+ act, err := interpreter.NewActivation(vars)
+ if err != nil {
+ return nil, err
+ }
+ return PartialVars(act, e.computeUnknownVars(act)...)
+}
+
+// ResidualAst takes an Ast and its EvalDetails to produce a new Ast which only contains the
+// attribute references which are unknown.
+//
+// Residual expressions are beneficial in a few scenarios:
+//
+// - Optimizing constant expression evaluations away.
+// - Indexing and pruning expressions based on known input arguments.
+// - Surfacing additional requirements that are needed in order to complete an evaluation.
+// - Sharing the evaluation of an expression across multiple machines/nodes.
+//
+// For example, if an expression targets a 'resource' and 'request' attribute and the possible
+// values for the resource are known, a PartialActivation could mark the 'request' as an unknown
+// interpreter.AttributePattern and the resulting ResidualAst would be reduced to only the parts
+// of the expression that reference the 'request'.
+//
+// Note, the expression ids within the residual AST generated through this method have no
+// correlation to the expression ids of the original AST.
+//
+// See the PartialVars helper for how to construct a PartialActivation.
+//
+// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an
+// Ast format and then Program again.
+func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
+ pruned := interpreter.PruneAst(a.Expr(), a.SourceInfo().GetMacroCalls(), details.State())
+ expr, err := AstToString(ParsedExprToAst(pruned))
+ if err != nil {
+ return nil, err
+ }
+ parsed, iss := e.Parse(expr)
+ if iss != nil && iss.Err() != nil {
+ return nil, iss.Err()
+ }
+ if !a.IsChecked() {
+ return parsed, nil
+ }
+ checked, iss := e.Check(parsed)
+ if iss != nil && iss.Err() != nil {
+ return nil, iss.Err()
+ }
+ return checked, nil
+}
+
+// EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and
+// extension functions provided by estimator.
+func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) {
+ checked := &celast.CheckedAST{
+ Expr: ast.Expr(),
+ SourceInfo: ast.SourceInfo(),
+ TypeMap: ast.typeMap,
+ ReferenceMap: ast.refMap,
+ }
+ extendedOpts := make([]checker.CostOption, 0, len(e.costOptions))
+ extendedOpts = append(extendedOpts, opts...)
+ extendedOpts = append(extendedOpts, e.costOptions...)
+ return checker.Cost(checked, estimator, extendedOpts...)
+}
+
+// configure applies a series of EnvOptions to the current environment.
+func (e *Env) configure(opts []EnvOption) (*Env, error) {
+ // Customized the environment using the provided EnvOption values. If an error is
+ // generated at any step this, will be returned as a nil Env with a non-nil error.
+ var err error
+ for _, opt := range opts {
+ e, err = opt(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // If the default UTC timezone fix has been enabled, make sure the library is configured
+ e, err = e.maybeApplyFeature(featureDefaultUTCTimeZone, Lib(timeUTCLibrary{}))
+ if err != nil {
+ return nil, err
+ }
+
+ // Configure the parser.
+ prsrOpts := []parser.Option{}
+ prsrOpts = append(prsrOpts, e.prsrOpts...)
+ prsrOpts = append(prsrOpts, parser.Macros(e.macros...))
+
+ if e.HasFeature(featureEnableMacroCallTracking) {
+ prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true))
+ }
+ if e.HasFeature(featureVariadicLogicalASTs) {
+ prsrOpts = append(prsrOpts, parser.EnableVariadicOperatorASTs(true))
+ }
+ e.prsr, err = parser.NewParser(prsrOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Ensure that the checker init happens eagerly rather than lazily.
+ if e.HasFeature(featureEagerlyValidateDeclarations) {
+ _, err := e.initChecker()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return e, nil
+}
+
+func (e *Env) initChecker() (*checker.Env, error) {
+ e.chkOnce.Do(func() {
+ chkOpts := []checker.Option{}
+ chkOpts = append(chkOpts, e.chkOpts...)
+ chkOpts = append(chkOpts,
+ checker.CrossTypeNumericComparisons(
+ e.HasFeature(featureCrossTypeNumericComparisons)))
+
+ ce, err := checker.NewEnv(e.Container, e.provider, chkOpts...)
+ if err != nil {
+ e.setCheckerOrError(nil, err)
+ return
+ }
+ // Add the statically configured declarations.
+ err = ce.AddIdents(e.variables...)
+ if err != nil {
+ e.setCheckerOrError(nil, err)
+ return
+ }
+ // Add the function declarations which are derived from the FunctionDecl instances.
+ for _, fn := range e.functions {
+ if fn.IsDeclarationDisabled() {
+ continue
+ }
+ err = ce.AddFunctions(fn)
+ if err != nil {
+ e.setCheckerOrError(nil, err)
+ return
+ }
+ }
+ // Add function declarations here separately.
+ e.setCheckerOrError(ce, nil)
+ })
+ return e.getCheckerOrError()
+}
+
+// setCheckerOrError sets the checker.Env or error state in a concurrency-safe manner
+func (e *Env) setCheckerOrError(chk *checker.Env, chkErr error) {
+ e.chkMutex.Lock()
+ e.chk = chk
+ e.chkErr = chkErr
+ e.chkMutex.Unlock()
+}
+
+// getCheckerOrError gets the checker.Env or error state in a concurrency-safe manner
+func (e *Env) getCheckerOrError() (*checker.Env, error) {
+ e.chkMutex.Lock()
+ defer e.chkMutex.Unlock()
+ return e.chk, e.chkErr
+}
+
+// maybeApplyFeature determines whether the feature-guarded option is enabled, and if so applies
+// the feature if it has not already been enabled.
+func (e *Env) maybeApplyFeature(feature int, option EnvOption) (*Env, error) {
+ if !e.HasFeature(feature) {
+ return e, nil
+ }
+ _, applied := e.appliedFeatures[feature]
+ if applied {
+ return e, nil
+ }
+ e, err := option(e)
+ if err != nil {
+ return nil, err
+ }
+ // record that the feature has been applied since it will generate declarations
+ // and functions which will be propagated on Extend() calls and which should only
+ // be registered once.
+ e.appliedFeatures[feature] = true
+ return e, nil
+}
+
+// computeUnknownVars determines a set of missing variables based on the input activation and the
+// environment's configured declaration set.
+func (e *Env) computeUnknownVars(vars interpreter.Activation) []*interpreter.AttributePattern {
+ var unknownPatterns []*interpreter.AttributePattern
+ for _, v := range e.variables {
+ varName := v.Name()
+ if _, found := vars.ResolveName(varName); found {
+ continue
+ }
+ unknownPatterns = append(unknownPatterns, interpreter.NewAttributePattern(varName))
+ }
+ return unknownPatterns
+}
+
+// Error type which references an expression id, a location within source, and a message.
+type Error = common.Error
+
+// Issues defines methods for inspecting the error details of parse and check calls.
+//
+// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct.
+type Issues struct {
+ errs *common.Errors
+ info *exprpb.SourceInfo
+}
+
+// NewIssues returns an Issues struct from a common.Errors object.
+func NewIssues(errs *common.Errors) *Issues {
+ return NewIssuesWithSourceInfo(errs, nil)
+}
+
+// NewIssuesWithSourceInfo returns an Issues struct from a common.Errors object with SourceInfo metatata
+// which can be used with the `ReportErrorAtID` method for additional error reports within the context
+// information that's inferred from an expression id.
+func NewIssuesWithSourceInfo(errs *common.Errors, info *exprpb.SourceInfo) *Issues {
+ return &Issues{
+ errs: errs,
+ info: info,
+ }
+}
+
+// Err returns an error value if the issues list contains one or more errors.
+func (i *Issues) Err() error {
+ if i == nil {
+ return nil
+ }
+ if len(i.Errors()) > 0 {
+ return errors.New(i.String())
+ }
+ return nil
+}
+
+// Errors returns the collection of errors encountered in more granular detail.
+func (i *Issues) Errors() []*Error {
+ if i == nil {
+ return []*Error{}
+ }
+ return i.errs.GetErrors()
+}
+
+// Append collects the issues from another Issues struct into a new Issues object.
+func (i *Issues) Append(other *Issues) *Issues {
+ if i == nil {
+ return other
+ }
+ if other == nil {
+ return i
+ }
+ return NewIssues(i.errs.Append(other.errs.GetErrors()))
+}
+
+// String converts the issues to a suitable display string.
+func (i *Issues) String() string {
+ if i == nil {
+ return ""
+ }
+ return i.errs.ToDisplayString()
+}
+
+// ReportErrorAtID reports an error message with an optional set of formatting arguments.
+//
+// The source metadata for the expression at `id`, if present, is attached to the error report.
+// To ensure that source metadata is attached to error reports, use NewIssuesWithSourceInfo.
+func (i *Issues) ReportErrorAtID(id int64, message string, args ...any) {
+ i.errs.ReportErrorAtID(id, locationByID(id, i.info), message, args...)
+}
+
+// locationByID returns a common.Location given an expression id.
+//
+// TODO: move this functionality into the native SourceInfo and an overhaul of the common.Source
+// as this implementation relies on the abstractions present in the protobuf SourceInfo object,
+// and is replicated in the checker.
+func locationByID(id int64, sourceInfo *exprpb.SourceInfo) common.Location {
+ positions := sourceInfo.GetPositions()
+ var line = 1
+ if offset, found := positions[id]; found {
+ col := int(offset)
+ for _, lineOffset := range sourceInfo.GetLineOffsets() {
+ if lineOffset < offset {
+ line++
+ col = int(offset - lineOffset)
+ } else {
+ break
+ }
+ }
+ return common.NewLocation(line, col)
+ }
+ return common.NoLocation
+}
+
+// getStdEnv lazy initializes the CEL standard environment.
+func getStdEnv() (*Env, error) {
+ stdEnvInit.Do(func() {
+ stdEnv, stdEnvErr = NewCustomEnv(StdLib(), EagerlyValidateDeclarations(true))
+ })
+ return stdEnv, stdEnvErr
+}
+
+// interopCELTypeProvider layers support for the types.Provider interface on top of a ref.TypeProvider.
+type interopCELTypeProvider struct {
+ ref.TypeProvider
+}
+
+// FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists.
+//
+// This method proxies to the underyling ref.TypeProvider's FindType method and converts protobuf type
+// into a native type representation. If the conversion fails, the type is listed as not found.
+func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) {
+ if et, found := p.FindType(typeName); found {
+ t, err := types.ExprTypeToType(et)
+ if err != nil {
+ return nil, false
+ }
+ return t, true
+ }
+ return nil, false
+}
+
+// FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field
+// name, if one exists.
+//
+// This method proxies to the underyling ref.TypeProvider's FindFieldType method and converts protobuf type
+// into a native type representation. If the conversion fails, the type is listed as not found.
+func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) {
+ if ft, found := p.FindFieldType(structType, fieldName); found {
+ t, err := types.ExprTypeToType(ft.Type)
+ if err != nil {
+ return nil, false
+ }
+ return &types.FieldType{
+ Type: t,
+ IsSet: ft.IsSet,
+ GetFrom: ft.GetFrom,
+ }, true
+ }
+ return nil, false
+}
+
+// interopLegacyTypeProvider layers support for the ref.TypeProvider interface on top of a types.Provider.
+type interopLegacyTypeProvider struct {
+ types.Provider
+}
+
+// FindType retruns the protobuf Type representation for the input type name if one exists.
+//
+// This method proxies to the underlying types.Provider FindStructType method and converts the types.Type
+// value to a protobuf Type representation.
+//
+// Failure to convert the type will result in the type not being found.
+func (p *interopLegacyTypeProvider) FindType(typeName string) (*exprpb.Type, bool) {
+ if t, found := p.FindStructType(typeName); found {
+ et, err := types.TypeToExprType(t)
+ if err != nil {
+ return nil, false
+ }
+ return et, true
+ }
+ return nil, false
+}
+
+// FindFieldType returns the protobuf-based FieldType representation for the input type name and field,
+// if one exists.
+//
+// This call proxies to the types.Provider FindStructFieldType method and converts the types.FIeldType
+// value to a protobuf-based ref.FieldType representation if found.
+//
+// Failure to convert the FieldType will result in the field not being found.
+func (p *interopLegacyTypeProvider) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) {
+ if cft, found := p.FindStructFieldType(structType, fieldName); found {
+ et, err := types.TypeToExprType(cft.Type)
+ if err != nil {
+ return nil, false
+ }
+ return &ref.FieldType{
+ Type: et,
+ IsSet: cft.IsSet,
+ GetFrom: cft.GetFrom,
+ }, true
+ }
+ return nil, false
+}
+
+var (
+ stdEnvInit sync.Once
+ stdEnv *Env
+ stdEnvErr error
+)
diff --git a/vendor/github.com/google/cel-go/cel/io.go b/vendor/github.com/google/cel-go/cel/io.go
new file mode 100644
index 0000000..80f6314
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/io.go
@@ -0,0 +1,272 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/proto"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+// CheckedExprToAst converts a checked expression proto message to an Ast.
+func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast {
+ checked, _ := CheckedExprToAstWithSource(checkedExpr, nil)
+ return checked
+}
+
+// CheckedExprToAstWithSource converts a checked expression proto message to an Ast,
+// using the provided Source as the textual contents.
+//
+// In general the source is not necessary unless the AST has been modified between the
+// `Parse` and `Check` calls as an `Ast` created from the `Parse` step will carry the source
+// through future calls.
+//
+// Prefer CheckedExprToAst if loading expressions from storage.
+func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) (*Ast, error) {
+ checkedAST, err := ast.CheckedExprToCheckedAST(checkedExpr)
+ if err != nil {
+ return nil, err
+ }
+ return &Ast{
+ expr: checkedAST.Expr,
+ info: checkedAST.SourceInfo,
+ source: src,
+ refMap: checkedAST.ReferenceMap,
+ typeMap: checkedAST.TypeMap,
+ }, nil
+}
+
+// AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value.
+//
+// If the Ast.IsChecked() returns false, this conversion method will return an error.
+func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) {
+ if !a.IsChecked() {
+ return nil, fmt.Errorf("cannot convert unchecked ast")
+ }
+ cAst := &ast.CheckedAST{
+ Expr: a.expr,
+ SourceInfo: a.info,
+ ReferenceMap: a.refMap,
+ TypeMap: a.typeMap,
+ }
+ return ast.CheckedASTToCheckedExpr(cAst)
+}
+
+// ParsedExprToAst converts a parsed expression proto message to an Ast.
+func ParsedExprToAst(parsedExpr *exprpb.ParsedExpr) *Ast {
+ return ParsedExprToAstWithSource(parsedExpr, nil)
+}
+
+// ParsedExprToAstWithSource converts a parsed expression proto message to an Ast,
+// using the provided Source as the textual contents.
+//
+// In general you only need this if you need to recheck a previously checked
+// expression, or if you need to separately check a subset of an expression.
+//
+// Prefer ParsedExprToAst if loading expressions from storage.
+func ParsedExprToAstWithSource(parsedExpr *exprpb.ParsedExpr, src Source) *Ast {
+ si := parsedExpr.GetSourceInfo()
+ if si == nil {
+ si = &exprpb.SourceInfo{}
+ }
+ if src == nil {
+ src = common.NewInfoSource(si)
+ }
+ return &Ast{
+ expr: parsedExpr.GetExpr(),
+ info: si,
+ source: src,
+ }
+}
+
+// AstToParsedExpr converts an Ast to an protobuf ParsedExpr value.
+func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) {
+ return &exprpb.ParsedExpr{
+ Expr: a.Expr(),
+ SourceInfo: a.SourceInfo(),
+ }, nil
+}
+
+// AstToString converts an Ast back to a string if possible.
+//
+// Note, the conversion may not be an exact replica of the original expression, but will produce
+// a string that is semantically equivalent and whose textual representation is stable.
+func AstToString(a *Ast) (string, error) {
+ expr := a.Expr()
+ info := a.SourceInfo()
+ return parser.Unparse(expr, info)
+}
+
+// RefValueToValue converts between ref.Val and api.expr.Value.
+// The result Value is the serialized proto form. The ref.Val must not be error or unknown.
+func RefValueToValue(res ref.Val) (*exprpb.Value, error) {
+ switch res.Type() {
+ case types.BoolType:
+ return &exprpb.Value{
+ Kind: &exprpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil
+ case types.BytesType:
+ return &exprpb.Value{
+ Kind: &exprpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil
+ case types.DoubleType:
+ return &exprpb.Value{
+ Kind: &exprpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil
+ case types.IntType:
+ return &exprpb.Value{
+ Kind: &exprpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil
+ case types.ListType:
+ l := res.(traits.Lister)
+ sz := l.Size().(types.Int)
+ elts := make([]*exprpb.Value, 0, int64(sz))
+ for i := types.Int(0); i < sz; i++ {
+ v, err := RefValueToValue(l.Get(i))
+ if err != nil {
+ return nil, err
+ }
+ elts = append(elts, v)
+ }
+ return &exprpb.Value{
+ Kind: &exprpb.Value_ListValue{
+ ListValue: &exprpb.ListValue{Values: elts}}}, nil
+ case types.MapType:
+ mapper := res.(traits.Mapper)
+ sz := mapper.Size().(types.Int)
+ entries := make([]*exprpb.MapValue_Entry, 0, int64(sz))
+ for it := mapper.Iterator(); it.HasNext().(types.Bool); {
+ k := it.Next()
+ v := mapper.Get(k)
+ kv, err := RefValueToValue(k)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := RefValueToValue(v)
+ if err != nil {
+ return nil, err
+ }
+ entries = append(entries, &exprpb.MapValue_Entry{Key: kv, Value: vv})
+ }
+ return &exprpb.Value{
+ Kind: &exprpb.Value_MapValue{
+ MapValue: &exprpb.MapValue{Entries: entries}}}, nil
+ case types.NullType:
+ return &exprpb.Value{
+ Kind: &exprpb.Value_NullValue{}}, nil
+ case types.StringType:
+ return &exprpb.Value{
+ Kind: &exprpb.Value_StringValue{StringValue: res.Value().(string)}}, nil
+ case types.TypeType:
+ typeName := res.(ref.Type).TypeName()
+ return &exprpb.Value{Kind: &exprpb.Value_TypeValue{TypeValue: typeName}}, nil
+ case types.UintType:
+ return &exprpb.Value{
+ Kind: &exprpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil
+ default:
+ any, err := res.ConvertToNative(anyPbType)
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Value{
+ Kind: &exprpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil
+ }
+}
+
+var (
+ typeNameToTypeValue = map[string]ref.Val{
+ "bool": types.BoolType,
+ "bytes": types.BytesType,
+ "double": types.DoubleType,
+ "null_type": types.NullType,
+ "int": types.IntType,
+ "list": types.ListType,
+ "map": types.MapType,
+ "string": types.StringType,
+ "type": types.TypeType,
+ "uint": types.UintType,
+ }
+
+ anyPbType = reflect.TypeOf(&anypb.Any{})
+)
+
+// ValueToRefValue converts between exprpb.Value and ref.Val.
+func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) {
+ switch v.Kind.(type) {
+ case *exprpb.Value_NullValue:
+ return types.NullValue, nil
+ case *exprpb.Value_BoolValue:
+ return types.Bool(v.GetBoolValue()), nil
+ case *exprpb.Value_Int64Value:
+ return types.Int(v.GetInt64Value()), nil
+ case *exprpb.Value_Uint64Value:
+ return types.Uint(v.GetUint64Value()), nil
+ case *exprpb.Value_DoubleValue:
+ return types.Double(v.GetDoubleValue()), nil
+ case *exprpb.Value_StringValue:
+ return types.String(v.GetStringValue()), nil
+ case *exprpb.Value_BytesValue:
+ return types.Bytes(v.GetBytesValue()), nil
+ case *exprpb.Value_ObjectValue:
+ any := v.GetObjectValue()
+ msg, err := anypb.UnmarshalNew(any, proto.UnmarshalOptions{DiscardUnknown: true})
+ if err != nil {
+ return nil, err
+ }
+ return adapter.NativeToValue(msg), nil
+ case *exprpb.Value_MapValue:
+ m := v.GetMapValue()
+ entries := make(map[ref.Val]ref.Val)
+ for _, entry := range m.Entries {
+ key, err := ValueToRefValue(adapter, entry.Key)
+ if err != nil {
+ return nil, err
+ }
+ pb, err := ValueToRefValue(adapter, entry.Value)
+ if err != nil {
+ return nil, err
+ }
+ entries[key] = pb
+ }
+ return adapter.NativeToValue(entries), nil
+ case *exprpb.Value_ListValue:
+ l := v.GetListValue()
+ elts := make([]ref.Val, len(l.Values))
+ for i, e := range l.Values {
+ rv, err := ValueToRefValue(adapter, e)
+ if err != nil {
+ return nil, err
+ }
+ elts[i] = rv
+ }
+ return adapter.NativeToValue(elts), nil
+ case *exprpb.Value_TypeValue:
+ typeName := v.GetTypeValue()
+ tv, ok := typeNameToTypeValue[typeName]
+ if ok {
+ return tv, nil
+ }
+ return types.NewObjectTypeValue(typeName), nil
+ }
+ return nil, errors.New("unknown value")
+}
diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go
new file mode 100644
index 0000000..4d23208
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/library.go
@@ -0,0 +1,785 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "math"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/stdlib"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+ "github.com/google/cel-go/interpreter"
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+const (
+ optMapMacro = "optMap"
+ optFlatMapMacro = "optFlatMap"
+ hasValueFunc = "hasValue"
+ optionalNoneFunc = "optional.none"
+ optionalOfFunc = "optional.of"
+ optionalOfNonZeroValueFunc = "optional.ofNonZeroValue"
+ valueFunc = "value"
+ unusedIterVar = "#unused"
+)
+
+// Library provides a collection of EnvOption and ProgramOption values used to configure a CEL
+// environment for a particular use case or with a related set of functionality.
+//
+// Note, the ProgramOption values provided by a library are expected to be static and not vary
+// between calls to Env.Program(). If there is a need for such dynamic configuration, prefer to
+// configure these options outside the Library and within the Env.Program() call directly.
+type Library interface {
+ // CompileOptions returns a collection of functional options for configuring the Parse / Check
+ // environment.
+ CompileOptions() []EnvOption
+
+ // ProgramOptions returns a collection of functional options which should be included in every
+ // Program generated from the Env.Program() call.
+ ProgramOptions() []ProgramOption
+}
+
+// SingletonLibrary refines the Library interface to ensure that libraries in this format are only
+// configured once within the environment.
+type SingletonLibrary interface {
+ Library
+
+ // LibraryName provides a namespaced name which is used to check whether the library has already
+ // been configured in the environment.
+ LibraryName() string
+}
+
+// Lib creates an EnvOption out of a Library, allowing libraries to be provided as functional args,
+// and to be linked to each other.
+func Lib(l Library) EnvOption {
+ singleton, isSingleton := l.(SingletonLibrary)
+ return func(e *Env) (*Env, error) {
+ if isSingleton {
+ if e.HasLibrary(singleton.LibraryName()) {
+ return e, nil
+ }
+ e.libraries[singleton.LibraryName()] = true
+ }
+ var err error
+ for _, opt := range l.CompileOptions() {
+ e, err = opt(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ e.progOpts = append(e.progOpts, l.ProgramOptions()...)
+ return e, nil
+ }
+}
+
+// StdLib returns an EnvOption for the standard library of CEL functions and macros.
+func StdLib() EnvOption {
+ return Lib(stdLibrary{})
+}
+
+// stdLibrary implements the Library interface and provides functional options for the core CEL
+// features documented in the specification.
+type stdLibrary struct{}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (stdLibrary) LibraryName() string {
+ return "cel.lib.std"
+}
+
+// CompileOptions returns options for the standard CEL function declarations and macros.
+func (stdLibrary) CompileOptions() []EnvOption {
+ return []EnvOption{
+ func(e *Env) (*Env, error) {
+ var err error
+ for _, fn := range stdlib.Functions() {
+ existing, found := e.functions[fn.Name()]
+ if found {
+ fn, err = existing.Merge(fn)
+ if err != nil {
+ return nil, err
+ }
+ }
+ e.functions[fn.Name()] = fn
+ }
+ return e, nil
+ },
+ func(e *Env) (*Env, error) {
+ e.variables = append(e.variables, stdlib.Types()...)
+ return e, nil
+ },
+ Macros(StandardMacros...),
+ }
+}
+
+// ProgramOptions returns function implementations for the standard CEL functions.
+func (stdLibrary) ProgramOptions() []ProgramOption {
+ return []ProgramOption{}
+}
+
+// OptionalTypes enable support for optional syntax and types in CEL.
+//
+// The optional value type makes it possible to express whether variables have
+// been provided, whether a result has been computed, and in the future whether
+// an object field path, map key value, or list index has a value.
+//
+// # Syntax Changes
+//
+// OptionalTypes are unlike other CEL extensions because they modify the CEL
+// syntax itself, notably through the use of a `?` preceding a field name or
+// index value.
+//
+// ## Field Selection
+//
+// The optional syntax in field selection is denoted as `obj.?field`. In other
+// words, if a field is set, return `optional.of(obj.field)“, else
+// `optional.none()`. The optional field selection is viral in the sense that
+// after the first optional selection all subsequent selections or indices
+// are treated as optional, i.e. the following expressions are equivalent:
+//
+// obj.?field.subfield
+// obj.?field.?subfield
+//
+// ## Indexing
+//
+// Similar to field selection, the optional syntax can be used in index
+// expressions on maps and lists:
+//
+// list[?0]
+// map[?key]
+//
+// ## Optional Field Setting
+//
+// When creating map or message literals, if a field may be optionally set
+// based on its presence, then placing a `?` before the field name or key
+// will ensure the type on the right-hand side must be optional(T) where T
+// is the type of the field or key-value.
+//
+// The following returns a map with the key expression set only if the
+// subfield is present, otherwise an empty map is created:
+//
+// {?key: obj.?field.subfield}
+//
+// ## Optional Element Setting
+//
+// When creating list literals, an element in the list may be optionally added
+// when the element expression is preceded by a `?`:
+//
+// [a, ?b, ?c] // return a list with either [a], [a, b], [a, b, c], or [a, c]
+//
+// # Optional.Of
+//
+// Create an optional(T) value of a given value with type T.
+//
+// optional.of(10)
+//
+// # Optional.OfNonZeroValue
+//
+// Create an optional(T) value of a given value with type T if it is not a
+// zero-value. A zero-value the default empty value for any given CEL type,
+// including empty protobuf message types. If the value is empty, the result
+// of this call will be optional.none().
+//
+// optional.ofNonZeroValue([1, 2, 3]) // optional(list(int))
+// optional.ofNonZeroValue([]) // optional.none()
+// optional.ofNonZeroValue(0) // optional.none()
+// optional.ofNonZeroValue("") // optional.none()
+//
+// # Optional.None
+//
+// Create an empty optional value.
+//
+// # HasValue
+//
+// Determine whether the optional contains a value.
+//
+// optional.of(b'hello').hasValue() // true
+// optional.ofNonZeroValue({}).hasValue() // false
+//
+// # Value
+//
+// Get the value contained by the optional. If the optional does not have a
+// value, the result will be a CEL error.
+//
+// optional.of(b'hello').value() // b'hello'
+// optional.ofNonZeroValue({}).value() // error
+//
+// # Or
+//
+// If the value on the left-hand side is optional.none(), the optional value
+// on the right hand side is returned. If the value on the left-hand set is
+// valued, then it is returned. This operation is short-circuiting and will
+// only evaluate as many links in the `or` chain as are needed to return a
+// non-empty optional value.
+//
+// obj.?field.or(m[?key])
+// l[?index].or(obj.?field.subfield).or(obj.?other)
+//
+// # OrValue
+//
+// Either return the value contained within the optional on the left-hand side
+// or return the alternative value on the right hand side.
+//
+// m[?key].orValue("none")
+//
+// # OptMap
+//
+// Apply a transformation to the optional's underlying value if it is not empty
+// and return an optional typed result based on the transformation. The
+// transformation expression type must return a type T which is wrapped into
+// an optional.
+//
+// msg.?elements.optMap(e, e.size()).orValue(0)
+//
+// # OptFlatMap
+//
+// Introduced in version: 1
+//
+// Apply a transformation to the optional's underlying value if it is not empty
+// and return the result. The transform expression must return an optional(T)
+// rather than type T. This can be useful when dealing with zero values and
+// conditionally generating an empty or non-empty result in ways which cannot
+// be expressed with `optMap`.
+//
+// msg.?elements.optFlatMap(e, e[?0]) // return the first element if present.
+func OptionalTypes(opts ...OptionalTypesOption) EnvOption {
+ lib := &optionalLib{version: math.MaxUint32}
+ for _, opt := range opts {
+ lib = opt(lib)
+ }
+ return Lib(lib)
+}
+
+type optionalLib struct {
+ version uint32
+}
+
+// OptionalTypesOption is a functional interface for configuring the strings library.
+type OptionalTypesOption func(*optionalLib) *optionalLib
+
+// OptionalTypesVersion configures the version of the optional type library.
+//
+// The version limits which functions are available. Only functions introduced
+// below or equal to the given version included in the library. If this option
+// is not set, all functions are available.
+//
+// See the library documentation to determine which version a function was introduced.
+// If the documentation does not state which version a function was introduced, it can
+// be assumed to be introduced at version 0, when the library was first created.
+func OptionalTypesVersion(version uint32) OptionalTypesOption {
+ return func(lib *optionalLib) *optionalLib {
+ lib.version = version
+ return lib
+ }
+}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (lib *optionalLib) LibraryName() string {
+ return "cel.lib.optional"
+}
+
+// CompileOptions implements the Library interface method.
+func (lib *optionalLib) CompileOptions() []EnvOption {
+ paramTypeK := TypeParamType("K")
+ paramTypeV := TypeParamType("V")
+ optionalTypeV := OptionalType(paramTypeV)
+ listTypeV := ListType(paramTypeV)
+ mapTypeKV := MapType(paramTypeK, paramTypeV)
+
+ opts := []EnvOption{
+ // Enable the optional syntax in the parser.
+ enableOptionalSyntax(),
+
+ // Introduce the optional type.
+ Types(types.OptionalType),
+
+ // Configure the optMap and optFlatMap macros.
+ Macros(NewReceiverMacro(optMapMacro, 2, optMap)),
+
+ // Global and member functions for working with optional values.
+ Function(optionalOfFunc,
+ Overload("optional_of", []*Type{paramTypeV}, optionalTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ return types.OptionalOf(value)
+ }))),
+ Function(optionalOfNonZeroValueFunc,
+ Overload("optional_ofNonZeroValue", []*Type{paramTypeV}, optionalTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ v, isZeroer := value.(traits.Zeroer)
+ if !isZeroer || !v.IsZeroValue() {
+ return types.OptionalOf(value)
+ }
+ return types.OptionalNone
+ }))),
+ Function(optionalNoneFunc,
+ Overload("optional_none", []*Type{}, optionalTypeV,
+ FunctionBinding(func(values ...ref.Val) ref.Val {
+ return types.OptionalNone
+ }))),
+ Function(valueFunc,
+ MemberOverload("optional_value", []*Type{optionalTypeV}, paramTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ opt := value.(*types.Optional)
+ return opt.GetValue()
+ }))),
+ Function(hasValueFunc,
+ MemberOverload("optional_hasValue", []*Type{optionalTypeV}, BoolType,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ opt := value.(*types.Optional)
+ return types.Bool(opt.HasValue())
+ }))),
+
+ // Implementation of 'or' and 'orValue' are special-cased to support short-circuiting in the
+ // evaluation chain.
+ Function("or",
+ MemberOverload("optional_or_optional", []*Type{optionalTypeV, optionalTypeV}, optionalTypeV)),
+ Function("orValue",
+ MemberOverload("optional_orValue_value", []*Type{optionalTypeV, paramTypeV}, paramTypeV)),
+
+ // OptSelect is handled specially by the type-checker, so the receiver's field type is used to determine the
+ // optput type.
+ Function(operators.OptSelect,
+ Overload("select_optional_field", []*Type{DynType, StringType}, optionalTypeV)),
+
+ // OptIndex is handled mostly like any other indexing operation on a list or map, so the type-checker can use
+ // these signatures to determine type-agreement without any special handling.
+ Function(operators.OptIndex,
+ Overload("list_optindex_optional_int", []*Type{listTypeV, IntType}, optionalTypeV),
+ Overload("optional_list_optindex_optional_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
+ Overload("map_optindex_optional_value", []*Type{mapTypeKV, paramTypeK}, optionalTypeV),
+ Overload("optional_map_optindex_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
+
+ // Index overloads to accommodate using an optional value as the operand.
+ Function(operators.Index,
+ Overload("optional_list_index_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
+ Overload("optional_map_index_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
+ }
+ if lib.version >= 1 {
+ opts = append(opts, Macros(NewReceiverMacro(optFlatMapMacro, 2, optFlatMap)))
+ }
+ return opts
+}
+
+// ProgramOptions implements the Library interface method.
+func (lib *optionalLib) ProgramOptions() []ProgramOption {
+ return []ProgramOption{
+ CustomDecorator(decorateOptionalOr),
+ }
+}
+
+func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ varIdent := args[0]
+ varName := ""
+ switch varIdent.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ varName = varIdent.GetIdentExpr().GetName()
+ default:
+ return nil, meh.NewError(varIdent.GetId(), "optMap() variable name must be a simple identifier")
+ }
+ mapExpr := args[1]
+ return meh.GlobalCall(
+ operators.Conditional,
+ meh.ReceiverCall(hasValueFunc, target),
+ meh.GlobalCall(optionalOfFunc,
+ meh.Fold(
+ unusedIterVar,
+ meh.NewList(),
+ varName,
+ meh.ReceiverCall(valueFunc, target),
+ meh.LiteralBool(false),
+ meh.Ident(varName),
+ mapExpr,
+ ),
+ ),
+ meh.GlobalCall(optionalNoneFunc),
+ ), nil
+}
+
+func optFlatMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ varIdent := args[0]
+ varName := ""
+ switch varIdent.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ varName = varIdent.GetIdentExpr().GetName()
+ default:
+ return nil, meh.NewError(varIdent.GetId(), "optFlatMap() variable name must be a simple identifier")
+ }
+ mapExpr := args[1]
+ return meh.GlobalCall(
+ operators.Conditional,
+ meh.ReceiverCall(hasValueFunc, target),
+ meh.Fold(
+ unusedIterVar,
+ meh.NewList(),
+ varName,
+ meh.ReceiverCall(valueFunc, target),
+ meh.LiteralBool(false),
+ meh.Ident(varName),
+ mapExpr,
+ ),
+ meh.GlobalCall(optionalNoneFunc),
+ ), nil
+}
+
+func enableOptionalSyntax() EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.prsrOpts = append(e.prsrOpts, parser.EnableOptionalSyntax(true))
+ return e, nil
+ }
+}
+
+func decorateOptionalOr(i interpreter.Interpretable) (interpreter.Interpretable, error) {
+ call, ok := i.(interpreter.InterpretableCall)
+ if !ok {
+ return i, nil
+ }
+ args := call.Args()
+ if len(args) != 2 {
+ return i, nil
+ }
+ switch call.Function() {
+ case "or":
+ if call.OverloadID() != "" && call.OverloadID() != "optional_or_optional" {
+ return i, nil
+ }
+ return &evalOptionalOr{
+ id: call.ID(),
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+ case "orValue":
+ if call.OverloadID() != "" && call.OverloadID() != "optional_orValue_value" {
+ return i, nil
+ }
+ return &evalOptionalOrValue{
+ id: call.ID(),
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+ default:
+ return i, nil
+ }
+}
+
+// evalOptionalOr selects between two optional values, either the first if it has a value, or
+// the second optional expression is evaluated and returned.
+type evalOptionalOr struct {
+ id int64
+ lhs interpreter.Interpretable
+ rhs interpreter.Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (opt *evalOptionalOr) ID() int64 {
+ return opt.id
+}
+
+// Eval evaluates the left-hand side optional to determine whether it contains a value, else
+// proceeds with the right-hand side evaluation.
+func (opt *evalOptionalOr) Eval(ctx interpreter.Activation) ref.Val {
+ // short-circuit lhs.
+ optLHS := opt.lhs.Eval(ctx)
+ optVal, ok := optLHS.(*types.Optional)
+ if !ok {
+ return optLHS
+ }
+ if optVal.HasValue() {
+ return optVal
+ }
+ return opt.rhs.Eval(ctx)
+}
+
+// evalOptionalOrValue selects between an optional or a concrete value. If the optional has a value,
+// its value is returned, otherwise the alternative value expression is evaluated and returned.
+type evalOptionalOrValue struct {
+ id int64
+ lhs interpreter.Interpretable
+ rhs interpreter.Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (opt *evalOptionalOrValue) ID() int64 {
+ return opt.id
+}
+
+// Eval evaluates the left-hand side optional to determine whether it contains a value, else
+// proceeds with the right-hand side evaluation.
+func (opt *evalOptionalOrValue) Eval(ctx interpreter.Activation) ref.Val {
+ // short-circuit lhs.
+ optLHS := opt.lhs.Eval(ctx)
+ optVal, ok := optLHS.(*types.Optional)
+ if !ok {
+ return optLHS
+ }
+ if optVal.HasValue() {
+ return optVal.GetValue()
+ }
+ return opt.rhs.Eval(ctx)
+}
+
+type timeUTCLibrary struct{}
+
+func (timeUTCLibrary) CompileOptions() []EnvOption {
+ return timeOverloadDeclarations
+}
+
+func (timeUTCLibrary) ProgramOptions() []ProgramOption {
+ return []ProgramOption{}
+}
+
+// Declarations and functions which enable using UTC on time.Time inputs when the timezone is unspecified
+// in the CEL expression.
+var (
+ utcTZ = types.String("UTC")
+
+ timeOverloadDeclarations = []EnvOption{
+ Function(overloads.TimeGetHours,
+ MemberOverload(overloads.DurationToHours, []*Type{DurationType}, IntType,
+ UnaryBinding(types.DurationGetHours))),
+ Function(overloads.TimeGetMinutes,
+ MemberOverload(overloads.DurationToMinutes, []*Type{DurationType}, IntType,
+ UnaryBinding(types.DurationGetMinutes))),
+ Function(overloads.TimeGetSeconds,
+ MemberOverload(overloads.DurationToSeconds, []*Type{DurationType}, IntType,
+ UnaryBinding(types.DurationGetSeconds))),
+ Function(overloads.TimeGetMilliseconds,
+ MemberOverload(overloads.DurationToMilliseconds, []*Type{DurationType}, IntType,
+ UnaryBinding(types.DurationGetMilliseconds))),
+ Function(overloads.TimeGetFullYear,
+ MemberOverload(overloads.TimestampToYear, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetFullYear(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToYearWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetFullYear),
+ ),
+ ),
+ Function(overloads.TimeGetMonth,
+ MemberOverload(overloads.TimestampToMonth, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetMonth(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToMonthWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetMonth),
+ ),
+ ),
+ Function(overloads.TimeGetDayOfYear,
+ MemberOverload(overloads.TimestampToDayOfYear, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfYear(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToDayOfYearWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(func(ts, tz ref.Val) ref.Val {
+ return timestampGetDayOfYear(ts, tz)
+ }),
+ ),
+ ),
+ Function(overloads.TimeGetDayOfMonth,
+ MemberOverload(overloads.TimestampToDayOfMonthZeroBased, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfMonthZeroBased(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetDayOfMonthZeroBased),
+ ),
+ ),
+ Function(overloads.TimeGetDate,
+ MemberOverload(overloads.TimestampToDayOfMonthOneBased, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfMonthOneBased(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetDayOfMonthOneBased),
+ ),
+ ),
+ Function(overloads.TimeGetDayOfWeek,
+ MemberOverload(overloads.TimestampToDayOfWeek, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfWeek(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToDayOfWeekWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetDayOfWeek),
+ ),
+ ),
+ Function(overloads.TimeGetHours,
+ MemberOverload(overloads.TimestampToHours, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetHours(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToHoursWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetHours),
+ ),
+ ),
+ Function(overloads.TimeGetMinutes,
+ MemberOverload(overloads.TimestampToMinutes, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetMinutes(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToMinutesWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetMinutes),
+ ),
+ ),
+ Function(overloads.TimeGetSeconds,
+ MemberOverload(overloads.TimestampToSeconds, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetSeconds(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToSecondsWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetSeconds),
+ ),
+ ),
+ Function(overloads.TimeGetMilliseconds,
+ MemberOverload(overloads.TimestampToMilliseconds, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetMilliseconds(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToMillisecondsWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetMilliseconds),
+ ),
+ ),
+ }
+)
+
+func timestampGetFullYear(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Year())
+}
+
+func timestampGetMonth(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ // CEL spec indicates that the month should be 0-based, but the Time value
+ // for Month() is 1-based.
+ return types.Int(t.Month() - 1)
+}
+
+func timestampGetDayOfYear(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.YearDay() - 1)
+}
+
+func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Day() - 1)
+}
+
+func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Day())
+}
+
+func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Weekday())
+}
+
+func timestampGetHours(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Hour())
+}
+
+func timestampGetMinutes(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Minute())
+}
+
+func timestampGetSeconds(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Second())
+}
+
+func timestampGetMilliseconds(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Nanosecond() / 1000000)
+}
+
+func inTimeZone(ts, tz ref.Val) (time.Time, error) {
+ t := ts.(types.Timestamp)
+ val := string(tz.(types.String))
+ ind := strings.Index(val, ":")
+ if ind == -1 {
+ loc, err := time.LoadLocation(val)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return t.In(loc), nil
+ }
+
+ // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC
+ // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes.
+ hr, err := strconv.Atoi(string(val[0:ind]))
+ if err != nil {
+ return time.Time{}, err
+ }
+ min, err := strconv.Atoi(string(val[ind+1:]))
+ if err != nil {
+ return time.Time{}, err
+ }
+ var offset int
+ if string(val[0]) == "-" {
+ offset = hr*60 - min
+ } else {
+ offset = hr*60 + min
+ }
+ secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds())
+ timezone := time.FixedZone("", secondsEastOfUTC)
+ return t.In(timezone), nil
+}
diff --git a/vendor/github.com/google/cel-go/cel/macro.go b/vendor/github.com/google/cel-go/cel/macro.go
new file mode 100644
index 0000000..1eb414c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/macro.go
@@ -0,0 +1,144 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Macro describes a function signature to match and the MacroExpander to apply.
+//
+// Note: when a Macro should apply to multiple overloads (based on arg count) of a given function,
+// a Macro should be created per arg-count or as a var arg macro.
+type Macro = parser.Macro
+
+// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree.
+//
+// If the MacroExpander determines within the implementation that an expansion is not needed it may return
+// a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments
+// are not well-formed, the result of the expansion will be an error.
+//
+// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call
+// and produces as output an Expr ast node.
+//
+// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil.
+type MacroExpander = parser.MacroExpander
+
+// MacroExprHelper exposes helper methods for creating new expressions within a CEL abstract syntax tree.
+type MacroExprHelper = parser.ExprHelper
+
+// NewGlobalMacro creates a Macro for a global function with the specified arg count.
+func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro {
+ return parser.NewGlobalMacro(function, argCount, expander)
+}
+
+// NewReceiverMacro creates a Macro for a receiver function matching the specified arg count.
+func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro {
+ return parser.NewReceiverMacro(function, argCount, expander)
+}
+
+// NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count.
+func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro {
+ return parser.NewGlobalVarArgMacro(function, expander)
+}
+
+// NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
+func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro {
+ return parser.NewReceiverVarArgMacro(function, expander)
+}
+
+// HasMacroExpander expands the input call arguments into a presence test, e.g. has(.field)
+func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ return parser.MakeHas(meh, target, args)
+}
+
+// ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the
+// elements in the range match the predicate expressions:
+// .exists(, )
+func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ return parser.MakeExists(meh, target, args)
+}
+
+// ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly
+// one of the elements in the range match the predicate expressions:
+// .exists_one(, )
+func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ return parser.MakeExistsOne(meh, target, args)
+}
+
+// MapMacroExpander expands the input call arguments into a comprehension that transforms each element in the
+// input to produce an output list.
+//
+// There are two call patterns supported by map:
+//
+// .map(, )
+// .map(, , )
+//
+// In the second form only iterVar values which return true when provided to the predicate expression
+// are transformed.
+func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ return parser.MakeMap(meh, target, args)
+}
+
+// FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains
+// only elements which match the provided predicate expression:
+// .filter(, )
+func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ return parser.MakeFilter(meh, target, args)
+}
+
+var (
+ // Aliases to each macro in the CEL standard environment.
+ // Note: reassigning these macro variables may result in undefined behavior.
+
+ // HasMacro expands "has(m.f)" which tests the presence of a field, avoiding the need to
+ // specify the field as a string.
+ HasMacro = parser.HasMacro
+
+ // AllMacro expands "range.all(var, predicate)" into a comprehension which ensures that all
+ // elements in the range satisfy the predicate.
+ AllMacro = parser.AllMacro
+
+ // ExistsMacro expands "range.exists(var, predicate)" into a comprehension which ensures that
+ // some element in the range satisfies the predicate.
+ ExistsMacro = parser.ExistsMacro
+
+ // ExistsOneMacro expands "range.exists_one(var, predicate)", which is true if for exactly one
+ // element in range the predicate holds.
+ ExistsOneMacro = parser.ExistsOneMacro
+
+ // MapMacro expands "range.map(var, function)" into a comprehension which applies the function
+ // to each element in the range to produce a new list.
+ MapMacro = parser.MapMacro
+
+ // MapFilterMacro expands "range.map(var, predicate, function)" into a comprehension which
+ // first filters the elements in the range by the predicate, then applies the transform function
+ // to produce a new list.
+ MapFilterMacro = parser.MapFilterMacro
+
+ // FilterMacro expands "range.filter(var, predicate)" into a comprehension which filters
+ // elements in the range, producing a new list from the elements that satisfy the predicate.
+ FilterMacro = parser.FilterMacro
+
+ // StandardMacros provides an alias to all the CEL macros defined in the standard environment.
+ StandardMacros = []Macro{
+ HasMacro, AllMacro, ExistsMacro, ExistsOneMacro, MapMacro, MapFilterMacro, FilterMacro,
+ }
+
+ // NoMacros provides an alias to an empty list of macros
+ NoMacros = []Macro{}
+)
diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go
new file mode 100644
index 0000000..0586773
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/options.go
@@ -0,0 +1,659 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/dynamicpb"
+
+ "github.com/google/cel-go/checker"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/interpreter"
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ descpb "google.golang.org/protobuf/types/descriptorpb"
+)
+
+// These constants beginning with "Feature" enable optional behavior in
+// the library. See the documentation for each constant to see its
+// effects, compatibility restrictions, and standard conformance.
+const (
+ _ = iota
+
+ // Enable the tracking of function call expressions replaced by macros.
+ featureEnableMacroCallTracking
+
+ // Enable the use of cross-type numeric comparisons at the type-checker.
+ featureCrossTypeNumericComparisons
+
+ // Enable eager validation of declarations to ensure that Env values created
+ // with `Extend` inherit a validated list of declarations from the parent Env.
+ featureEagerlyValidateDeclarations
+
+ // Enable the use of the default UTC timezone when a timezone is not specified
+ // on a CEL timestamp operation. This fixes the scenario where the input time
+ // is not already in UTC.
+ featureDefaultUTCTimeZone
+
+ // Enable the serialization of logical operator ASTs as variadic calls, thus
+ // compressing the logic graph to a single call when multiple like-operator
+ // expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d])
+ featureVariadicLogicalASTs
+)
+
+// EnvOption is a functional interface for configuring the environment.
+type EnvOption func(e *Env) (*Env, error)
+
+// ClearMacros options clears all parser macros.
+//
+// Clearing macros will ensure CEL expressions can only contain linear evaluation paths, as
+// comprehensions such as `all` and `exists` are enabled only via macros.
+func ClearMacros() EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.macros = NoMacros
+ return e, nil
+ }
+}
+
+// CustomTypeAdapter swaps the default types.Adapter implementation with a custom one.
+//
+// Note: This option must be specified before the Types and TypeDescs options when used together.
+func CustomTypeAdapter(adapter types.Adapter) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.adapter = adapter
+ return e, nil
+ }
+}
+
+// CustomTypeProvider replaces the types.Provider implementation with a custom one.
+//
+// The `provider` variable type may either be types.Provider or ref.TypeProvider (deprecated)
+//
+// Note: This option must be specified before the Types and TypeDescs options when used together.
+func CustomTypeProvider(provider any) EnvOption {
+ return func(e *Env) (*Env, error) {
+ var err error
+ e.provider, err = maybeInteropProvider(provider)
+ return e, err
+ }
+}
+
+// Declarations option extends the declaration set configured in the environment.
+//
+// Note: Declarations will by default be appended to the pre-existing declaration set configured
+// for the environment. The NewEnv call builds on top of the standard CEL declarations. For a
+// purely custom set of declarations use NewCustomEnv.
+func Declarations(decls ...*exprpb.Decl) EnvOption {
+ declOpts := []EnvOption{}
+ var err error
+ var opt EnvOption
+ // Convert the declarations to `EnvOption` values ahead of time.
+ // Surface any errors in conversion when the options are applied.
+ for _, d := range decls {
+ opt, err = ExprDeclToDeclaration(d)
+ if err != nil {
+ break
+ }
+ declOpts = append(declOpts, opt)
+ }
+ return func(e *Env) (*Env, error) {
+ if err != nil {
+ return nil, err
+ }
+ for _, o := range declOpts {
+ e, err = o(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return e, nil
+ }
+}
+
+// EagerlyValidateDeclarations ensures that any collisions between configured declarations are caught
+// at the time of the `NewEnv` call.
+//
+// Eagerly validating declarations is also useful for bootstrapping a base `cel.Env` value.
+// Calls to base `Env.Extend()` will be significantly faster when declarations are eagerly validated
+// as declarations will be collision-checked at most once and only incrementally by way of `Extend`
+//
+// Disabled by default as not all environments are used for type-checking.
+func EagerlyValidateDeclarations(enabled bool) EnvOption {
+ return features(featureEagerlyValidateDeclarations, enabled)
+}
+
+// HomogeneousAggregateLiterals disables mixed type list and map literal values.
+//
+// Note, it is still possible to have heterogeneous aggregates when provided as variables to the
+// expression, as well as via conversion of well-known dynamic types, or with unchecked
+// expressions.
+func HomogeneousAggregateLiterals() EnvOption {
+ return ASTValidators(ValidateHomogeneousAggregateLiterals())
+}
+
+// variadicLogicalOperatorASTs flatten like-operator chained logical expressions into a single
+// variadic call with N-terms. This behavior is useful when serializing to a protocol buffer as
+// it will reduce the number of recursive calls needed to deserialize the AST later.
+//
+// For example, given the following expression the call graph will be rendered accordingly:
+//
+// expression: a && b && c && (d || e)
+// ast: call(_&&_, [a, b, c, call(_||_, [d, e])])
+func variadicLogicalOperatorASTs() EnvOption {
+ return features(featureVariadicLogicalASTs, true)
+}
+
+// Macros option extends the macro set configured in the environment.
+//
+// Note: This option must be specified after ClearMacros if used together.
+func Macros(macros ...Macro) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.macros = append(e.macros, macros...)
+ return e, nil
+ }
+}
+
+// Container sets the container for resolving variable names. Defaults to an empty container.
+//
+// If all references within an expression are relative to a protocol buffer package, then
+// specifying a container of `google.type` would make it possible to write expressions such as
+// `Expr{expression: 'a < b'}` instead of having to write `google.type.Expr{...}`.
+func Container(name string) EnvOption {
+ return func(e *Env) (*Env, error) {
+ cont, err := e.Container.Extend(containers.Name(name))
+ if err != nil {
+ return nil, err
+ }
+ e.Container = cont
+ return e, nil
+ }
+}
+
+// Abbrevs configures a set of simple names as abbreviations for fully-qualified names.
+//
+// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name.
+// Abbreviations can be useful when working with variables, functions, and especially types from
+// multiple namespaces:
+//
+// // CEL object construction
+// qual.pkg.version.ObjTypeName{
+// field: alt.container.ver.FieldTypeName{value: ...}
+// }
+//
+// Only one the qualified names above may be used as the CEL container, so at least one of these
+// references must be a long qualified name within an otherwise short CEL program. Using the
+// following abbreviations, the program becomes much simpler:
+//
+// // CEL Go option
+// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
+// // Simplified Object construction
+// ObjTypeName{field: FieldTypeName{value: ...}}
+//
+// There are a few rules for the qualified names and the simple abbreviations generated from them:
+// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`.
+// - The last element in the qualified name is the abbreviation.
+// - Abbreviations must not collide with each other.
+// - The abbreviation must not collide with unqualified names in use.
+//
+// Abbreviations are distinct from container-based references in the following important ways:
+// - Abbreviations must expand to a fully-qualified name.
+// - Expanded abbreviations do not participate in namespace resolution.
+// - Abbreviation expansion is done instead of the container search for a matching identifier.
+// - Containers follow C++ namespace resolution rules with searches from the most qualified name
+//
+// to the least qualified name.
+//
+// - Container references within the CEL program may be relative, and are resolved to fully
+//
+// qualified names at either type-check time or program plan time, whichever comes first.
+//
+// If there is ever a case where an identifier could be in both the container and as an
+// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is
+// preserved between compilations even as the container evolves.
+func Abbrevs(qualifiedNames ...string) EnvOption {
+ return func(e *Env) (*Env, error) {
+ cont, err := e.Container.Extend(containers.Abbrevs(qualifiedNames...))
+ if err != nil {
+ return nil, err
+ }
+ e.Container = cont
+ return e, nil
+ }
+}
+
+// Types adds one or more type declarations to the environment, allowing for construction of
+// type-literals whose definitions are included in the common expression built-in set.
+//
+// The input types may either be instances of `proto.Message` or `ref.Type`. Any other type
+// provided to this option will result in an error.
+//
+// Well-known protobuf types within the `google.protobuf.*` package are included in the standard
+// environment by default.
+//
+// Note: This option must be specified after the CustomTypeProvider option when used together.
+func Types(addTypes ...any) EnvOption {
+ return func(e *Env) (*Env, error) {
+ var reg ref.TypeRegistry
+ var isReg bool
+ reg, isReg = e.provider.(*types.Registry)
+ if !isReg {
+ reg, isReg = e.provider.(ref.TypeRegistry)
+ }
+ if !isReg {
+ return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
+ }
+ for _, t := range addTypes {
+ switch v := t.(type) {
+ case proto.Message:
+ fdMap := pb.CollectFileDescriptorSet(v)
+ for _, fd := range fdMap {
+ err := reg.RegisterDescriptor(fd)
+ if err != nil {
+ return nil, err
+ }
+ }
+ case ref.Type:
+ err := reg.RegisterType(v)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unsupported type: %T", t)
+ }
+ }
+ return e, nil
+ }
+}
+
+// TypeDescs adds type declarations from any protoreflect.FileDescriptor, protoregistry.Files,
+// google.protobuf.FileDescriptorProto or google.protobuf.FileDescriptorSet provided.
+//
+// Note that messages instantiated from these descriptors will be *dynamicpb.Message values
+// rather than the concrete message type.
+//
+// TypeDescs are hermetic to a single Env object, but may be copied to other Env values via
+// extension or by re-using the same EnvOption with another NewEnv() call.
+func TypeDescs(descs ...any) EnvOption {
+ return func(e *Env) (*Env, error) {
+ reg, isReg := e.provider.(ref.TypeRegistry)
+ if !isReg {
+ return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
+ }
+ // Scan the input descriptors for FileDescriptorProto messages and accumulate them into a
+ // synthetic FileDescriptorSet as the FileDescriptorProto messages may refer to each other
+ // and will not resolve properly unless they are part of the same set.
+ var fds *descpb.FileDescriptorSet
+ for _, d := range descs {
+ switch f := d.(type) {
+ case *descpb.FileDescriptorProto:
+ if fds == nil {
+ fds = &descpb.FileDescriptorSet{
+ File: []*descpb.FileDescriptorProto{},
+ }
+ }
+ fds.File = append(fds.File, f)
+ }
+ }
+ if fds != nil {
+ if err := registerFileSet(reg, fds); err != nil {
+ return nil, err
+ }
+ }
+ for _, d := range descs {
+ switch f := d.(type) {
+ case *protoregistry.Files:
+ if err := registerFiles(reg, f); err != nil {
+ return nil, err
+ }
+ case protoreflect.FileDescriptor:
+ if err := reg.RegisterDescriptor(f); err != nil {
+ return nil, err
+ }
+ case *descpb.FileDescriptorSet:
+ if err := registerFileSet(reg, f); err != nil {
+ return nil, err
+ }
+ case *descpb.FileDescriptorProto:
+ // skip, handled as a synthetic file descriptor set.
+ default:
+ return nil, fmt.Errorf("unsupported type descriptor: %T", d)
+ }
+ }
+ return e, nil
+ }
+}
+
+func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) error {
+ files, err := protodesc.NewFiles(fileSet)
+ if err != nil {
+ return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err)
+ }
+ return registerFiles(reg, files)
+}
+
+func registerFiles(reg ref.TypeRegistry, files *protoregistry.Files) error {
+ var err error
+ files.RangeFiles(func(fd protoreflect.FileDescriptor) bool {
+ err = reg.RegisterDescriptor(fd)
+ return err == nil
+ })
+ return err
+}
+
+// ProgramOption is a functional interface for configuring evaluation bindings and behaviors.
+type ProgramOption func(p *prog) (*prog, error)
+
+// CustomDecorator appends an InterpreterDecorator to the program.
+//
+// InterpretableDecorators can be used to inspect, alter, or replace the Program plan.
+func CustomDecorator(dec interpreter.InterpretableDecorator) ProgramOption {
+ return func(p *prog) (*prog, error) {
+ p.decorators = append(p.decorators, dec)
+ return p, nil
+ }
+}
+
+// Functions adds function overloads that extend or override the set of CEL built-ins.
+//
+// Deprecated: use Function() instead to declare the function, its overload signatures,
+// and the overload implementations.
+func Functions(funcs ...*functions.Overload) ProgramOption {
+ return func(p *prog) (*prog, error) {
+ if err := p.dispatcher.Add(funcs...); err != nil {
+ return nil, err
+ }
+ return p, nil
+ }
+}
+
+// Globals sets the global variable values for a given program. These values may be shadowed by
+// variables with the same name provided to the Eval() call. If Globals is used in a Library with
+// a Lib EnvOption, vars may shadow variables provided by previously added libraries.
+//
+// The vars value may either be an `interpreter.Activation` instance or a `map[string]any`.
+func Globals(vars any) ProgramOption {
+ return func(p *prog) (*prog, error) {
+ defaultVars, err := interpreter.NewActivation(vars)
+ if err != nil {
+ return nil, err
+ }
+ if p.defaultVars != nil {
+ defaultVars = interpreter.NewHierarchicalActivation(p.defaultVars, defaultVars)
+ }
+ p.defaultVars = defaultVars
+ return p, nil
+ }
+}
+
+// OptimizeRegex provides a way to replace the InterpretableCall for regex functions. This can be used
+// to compile regex string constants at program creation time and report any errors and then use the
+// compiled regex for all regex function invocations.
+func OptimizeRegex(regexOptimizations ...*interpreter.RegexOptimization) ProgramOption {
+ return func(p *prog) (*prog, error) {
+ p.regexOptimizations = append(p.regexOptimizations, regexOptimizations...)
+ return p, nil
+ }
+}
+
+// EvalOption indicates an evaluation option that may affect the evaluation behavior or information
+// in the output result.
+type EvalOption int
+
+const (
+ // OptTrackState will cause the runtime to return an immutable EvalState value in the Result.
+ OptTrackState EvalOption = 1 << iota
+
+ // OptExhaustiveEval causes the runtime to disable short-circuits and track state.
+ OptExhaustiveEval EvalOption = 1< 0 {
+ decorators = append(decorators, interpreter.InterruptableEval())
+ }
+ // Enable constant folding first.
+ if p.evalOpts&OptOptimize == OptOptimize {
+ decorators = append(decorators, interpreter.Optimize())
+ p.regexOptimizations = append(p.regexOptimizations, interpreter.MatchesRegexOptimization)
+ }
+ // Enable regex compilation of constants immediately after folding constants.
+ if len(p.regexOptimizations) > 0 {
+ decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...))
+ }
+ // Enable compile-time checking of syntax/cardinality for string.format calls.
+ if p.evalOpts&OptCheckStringFormat == OptCheckStringFormat {
+ var isValidType func(id int64, validTypes ...ref.Type) (bool, error)
+ if ast.IsChecked() {
+ isValidType = func(id int64, validTypes ...ref.Type) (bool, error) {
+ t := ast.typeMap[id]
+ if t.Kind() == DynKind {
+ return true, nil
+ }
+ for _, vt := range validTypes {
+ k, err := typeValueToKind(vt)
+ if err != nil {
+ return false, err
+ }
+ if t.Kind() == k {
+ return true, nil
+ }
+ }
+ return false, nil
+ }
+ } else {
+ // if the AST isn't type-checked, short-circuit validation
+ isValidType = func(id int64, validTypes ...ref.Type) (bool, error) {
+ return true, nil
+ }
+ }
+ decorators = append(decorators, interpreter.InterpolateFormattedString(isValidType))
+ }
+
+ // Enable exhaustive eval, state tracking and cost tracking last since they require a factory.
+ if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 {
+ factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) {
+ costTracker.Estimator = p.callCostEstimator
+ costTracker.Limit = p.costLimit
+ for _, costOpt := range p.costOptions {
+ err := costOpt(costTracker)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This
+ // prevents the underlying memory from being shared between factory function calls causing
+ // undesired mutations.
+ decs := decorators[:len(decorators):len(decorators)]
+ var observers []interpreter.EvalObserver
+
+ if p.evalOpts&(OptExhaustiveEval|OptTrackState) != 0 {
+ // EvalStateObserver is required for OptExhaustiveEval.
+ observers = append(observers, interpreter.EvalStateObserver(state))
+ }
+ if p.evalOpts&OptTrackCost == OptTrackCost {
+ observers = append(observers, interpreter.CostObserver(costTracker))
+ }
+
+ // Enable exhaustive eval over a basic observer since it offers a superset of features.
+ if p.evalOpts&OptExhaustiveEval == OptExhaustiveEval {
+ decs = append(decs, interpreter.ExhaustiveEval(), interpreter.Observe(observers...))
+ } else if len(observers) > 0 {
+ decs = append(decs, interpreter.Observe(observers...))
+ }
+
+ return p.clone().initInterpretable(ast, decs)
+ }
+ return newProgGen(factory)
+ }
+ return p.initInterpretable(ast, decorators)
+}
+
+func (p *prog) initInterpretable(ast *Ast, decs []interpreter.InterpretableDecorator) (*prog, error) {
+ // Unchecked programs do not contain type and reference information and may be slower to execute.
+ if !ast.IsChecked() {
+ interpretable, err :=
+ p.interpreter.NewUncheckedInterpretable(ast.Expr(), decs...)
+ if err != nil {
+ return nil, err
+ }
+ p.interpretable = interpretable
+ return p, nil
+ }
+
+ // When the AST has been checked it contains metadata that can be used to speed up program execution.
+ checked := &celast.CheckedAST{
+ Expr: ast.Expr(),
+ SourceInfo: ast.SourceInfo(),
+ TypeMap: ast.typeMap,
+ ReferenceMap: ast.refMap,
+ }
+ interpretable, err := p.interpreter.NewInterpretable(checked, decs...)
+ if err != nil {
+ return nil, err
+ }
+ p.interpretable = interpretable
+ return p, nil
+}
+
+// Eval implements the Program interface method.
+func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) {
+ // Configure error recovery for unexpected panics during evaluation. Note, the use of named
+ // return values makes it possible to modify the error response during the recovery
+ // function.
+ defer func() {
+ if r := recover(); r != nil {
+ switch t := r.(type) {
+ case interpreter.EvalCancelledError:
+ err = t
+ default:
+ err = fmt.Errorf("internal error: %v", r)
+ }
+ }
+ }()
+ // Build a hierarchical activation if there are default vars set.
+ var vars interpreter.Activation
+ switch v := input.(type) {
+ case interpreter.Activation:
+ vars = v
+ case map[string]any:
+ vars = activationPool.Setup(v)
+ defer activationPool.Put(vars)
+ default:
+ return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input)
+ }
+ if p.defaultVars != nil {
+ vars = interpreter.NewHierarchicalActivation(p.defaultVars, vars)
+ }
+ v = p.interpretable.Eval(vars)
+ // The output of an internal Eval may have a value (`v`) that is a types.Err. This step
+ // translates the CEL value to a Go error response. This interface does not quite match the
+ // RPC signature which allows for multiple errors to be returned, but should be sufficient.
+ if types.IsError(v) {
+ err = v.(*types.Err)
+ }
+ return
+}
+
+// ContextEval implements the Program interface.
+func (p *prog) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) {
+ if ctx == nil {
+ return nil, nil, fmt.Errorf("context can not be nil")
+ }
+ // Configure the input, making sure to wrap Activation inputs in the special ctxActivation which
+ // exposes the #interrupted variable and manages rate-limited checks of the ctx.Done() state.
+ var vars interpreter.Activation
+ switch v := input.(type) {
+ case interpreter.Activation:
+ vars = ctxActivationPool.Setup(v, ctx.Done(), p.interruptCheckFrequency)
+ defer ctxActivationPool.Put(vars)
+ case map[string]any:
+ rawVars := activationPool.Setup(v)
+ defer activationPool.Put(rawVars)
+ vars = ctxActivationPool.Setup(rawVars, ctx.Done(), p.interruptCheckFrequency)
+ defer ctxActivationPool.Put(vars)
+ default:
+ return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input)
+ }
+ return p.Eval(vars)
+}
+
+// progFactory is a helper alias for marking a program creation factory function.
+type progFactory func(interpreter.EvalState, *interpreter.CostTracker) (Program, error)
+
+// progGen holds a reference to a progFactory instance and implements the Program interface.
+type progGen struct {
+ factory progFactory
+}
+
+// newProgGen tests the factory object by calling it once and returns a factory-based Program if
+// the test is successful.
+func newProgGen(factory progFactory) (Program, error) {
+ // Test the factory to make sure that configuration errors are spotted at config
+ tracker, err := interpreter.NewCostTracker(nil)
+ if err != nil {
+ return nil, err
+ }
+ _, err = factory(interpreter.NewEvalState(), tracker)
+ if err != nil {
+ return nil, err
+ }
+ return &progGen{factory: factory}, nil
+}
+
+// Eval implements the Program interface method.
+func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) {
+ // The factory based Eval() differs from the standard evaluation model in that it generates a
+ // new EvalState instance for each call to ensure that unique evaluations yield unique stateful
+ // results.
+ state := interpreter.NewEvalState()
+ costTracker, err := interpreter.NewCostTracker(nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ det := &EvalDetails{state: state, costTracker: costTracker}
+
+ // Generate a new instance of the interpretable using the factory configured during the call to
+ // newProgram(). It is incredibly unlikely that the factory call will generate an error given
+ // the factory test performed within the Program() call.
+ p, err := gen.factory(state, costTracker)
+ if err != nil {
+ return nil, det, err
+ }
+
+ // Evaluate the input, returning the result and the 'state' within EvalDetails.
+ v, _, err := p.Eval(input)
+ if err != nil {
+ return v, det, err
+ }
+ return v, det, nil
+}
+
+// ContextEval implements the Program interface method.
+func (gen *progGen) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) {
+ if ctx == nil {
+ return nil, nil, fmt.Errorf("context can not be nil")
+ }
+ // The factory based Eval() differs from the standard evaluation model in that it generates a
+ // new EvalState instance for each call to ensure that unique evaluations yield unique stateful
+ // results.
+ state := interpreter.NewEvalState()
+ costTracker, err := interpreter.NewCostTracker(nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ det := &EvalDetails{state: state, costTracker: costTracker}
+
+ // Generate a new instance of the interpretable using the factory configured during the call to
+ // newProgram(). It is incredibly unlikely that the factory call will generate an error given
+ // the factory test performed within the Program() call.
+ p, err := gen.factory(state, costTracker)
+ if err != nil {
+ return nil, det, err
+ }
+
+ // Evaluate the input, returning the result and the 'state' within EvalDetails.
+ v, _, err := p.ContextEval(ctx, input)
+ if err != nil {
+ return v, det, err
+ }
+ return v, det, nil
+}
+
+type ctxEvalActivation struct {
+ parent interpreter.Activation
+ interrupt <-chan struct{}
+ interruptCheckCount uint
+ interruptCheckFrequency uint
+}
+
+// ResolveName implements the Activation interface method, but adds a special #interrupted variable
+// which is capable of testing whether a 'done' signal is provided from a context.Context channel.
+func (a *ctxEvalActivation) ResolveName(name string) (any, bool) {
+ if name == "#interrupted" {
+ a.interruptCheckCount++
+ if a.interruptCheckCount%a.interruptCheckFrequency == 0 {
+ select {
+ case <-a.interrupt:
+ return true, true
+ default:
+ return nil, false
+ }
+ }
+ return nil, false
+ }
+ return a.parent.ResolveName(name)
+}
+
+func (a *ctxEvalActivation) Parent() interpreter.Activation {
+ return a.parent
+}
+
+func newCtxEvalActivationPool() *ctxEvalActivationPool {
+ return &ctxEvalActivationPool{
+ Pool: sync.Pool{
+ New: func() any {
+ return &ctxEvalActivation{}
+ },
+ },
+ }
+}
+
+type ctxEvalActivationPool struct {
+ sync.Pool
+}
+
+// Setup initializes a pooled Activation with the ability check for context.Context cancellation
+func (p *ctxEvalActivationPool) Setup(vars interpreter.Activation, done <-chan struct{}, interruptCheckRate uint) *ctxEvalActivation {
+ a := p.Pool.Get().(*ctxEvalActivation)
+ a.parent = vars
+ a.interrupt = done
+ a.interruptCheckCount = 0
+ a.interruptCheckFrequency = interruptCheckRate
+ return a
+}
+
+type evalActivation struct {
+ vars map[string]any
+ lazyVars map[string]any
+}
+
+// ResolveName looks up the value of the input variable name, if found.
+//
+// Lazy bindings may be supplied within the map-based input in either of the following forms:
+// - func() any
+// - func() ref.Val
+//
+// The lazy binding will only be invoked once per evaluation.
+//
+// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
+// the types.Adapter configured in the environment.
+func (a *evalActivation) ResolveName(name string) (any, bool) {
+ v, found := a.vars[name]
+ if !found {
+ return nil, false
+ }
+ switch obj := v.(type) {
+ case func() ref.Val:
+ if resolved, found := a.lazyVars[name]; found {
+ return resolved, true
+ }
+ lazy := obj()
+ a.lazyVars[name] = lazy
+ return lazy, true
+ case func() any:
+ if resolved, found := a.lazyVars[name]; found {
+ return resolved, true
+ }
+ lazy := obj()
+ a.lazyVars[name] = lazy
+ return lazy, true
+ default:
+ return obj, true
+ }
+}
+
+// Parent implements the interpreter.Activation interface
+func (a *evalActivation) Parent() interpreter.Activation {
+ return nil
+}
+
+func newEvalActivationPool() *evalActivationPool {
+ return &evalActivationPool{
+ Pool: sync.Pool{
+ New: func() any {
+ return &evalActivation{lazyVars: make(map[string]any)}
+ },
+ },
+ }
+}
+
+type evalActivationPool struct {
+ sync.Pool
+}
+
+// Setup initializes a pooled Activation object with the map input.
+func (p *evalActivationPool) Setup(vars map[string]any) *evalActivation {
+ a := p.Pool.Get().(*evalActivation)
+ a.vars = vars
+ return a
+}
+
+func (p *evalActivationPool) Put(value any) {
+ a := value.(*evalActivation)
+ for k := range a.lazyVars {
+ delete(a.lazyVars, k)
+ }
+ p.Pool.Put(a)
+}
+
+var (
+ emptyEvalState = interpreter.NewEvalState()
+
+ // activationPool is an internally managed pool of Activation values that wrap map[string]any inputs
+ activationPool = newEvalActivationPool()
+
+ // ctxActivationPool is an internally managed pool of Activation values that expose a special #interrupted variable
+ ctxActivationPool = newCtxEvalActivationPool()
+)
diff --git a/vendor/github.com/google/cel-go/cel/validator.go b/vendor/github.com/google/cel-go/cel/validator.go
new file mode 100644
index 0000000..78b3113
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/validator.go
@@ -0,0 +1,388 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/overloads"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+const (
+ homogeneousValidatorName = "cel.lib.std.validate.types.homogeneous"
+
+ // HomogeneousAggregateLiteralExemptFunctions is the ValidatorConfig key used to configure
+ // the set of function names which are exempt from homogeneous type checks. The expected type
+ // is a string list of function names.
+ //
+ // As an example, the `.format([args])` call expects the input arguments list to be
+ // comprised of a variety of types which correspond to the types expected by the format control
+ // clauses; however, all other uses of a mixed element type list, would be unexpected.
+ HomogeneousAggregateLiteralExemptFunctions = homogeneousValidatorName + ".exempt"
+)
+
+// ASTValidators configures a set of ASTValidator instances into the target environment.
+//
+// Validators are applied in the order in which the are specified and are treated as singletons.
+// The same ASTValidator with a given name will not be applied more than once.
+func ASTValidators(validators ...ASTValidator) EnvOption {
+ return func(e *Env) (*Env, error) {
+ for _, v := range validators {
+ if !e.HasValidator(v.Name()) {
+ e.validators = append(e.validators, v)
+ }
+ }
+ return e, nil
+ }
+}
+
+// ASTValidator defines a singleton interface for validating a type-checked Ast against an environment.
+//
+// Note: the Issues argument is mutable in the sense that it is intended to collect errors which will be
+// reported to the caller.
+type ASTValidator interface {
+ // Name returns the name of the validator. Names must be unique.
+ Name() string
+
+ // Validate validates a given Ast within an Environment and collects a set of potential issues.
+ //
+ // The ValidatorConfig is generated from the set of ASTValidatorConfigurer instances prior to
+ // the invocation of the Validate call. The expectation is that the validator configuration
+ // is created in sequence and immutable once provided to the Validate call.
+ //
+ // See individual validators for more information on their configuration keys and configuration
+ // properties.
+ Validate(*Env, ValidatorConfig, *ast.CheckedAST, *Issues)
+}
+
+// ValidatorConfig provides an accessor method for querying validator configuration state.
+type ValidatorConfig interface {
+ GetOrDefault(name string, value any) any
+}
+
+// MutableValidatorConfig provides mutation methods for querying and updating validator configuration
+// settings.
+type MutableValidatorConfig interface {
+ ValidatorConfig
+ Set(name string, value any) error
+}
+
+// ASTValidatorConfigurer indicates that this object, currently expected to be an ASTValidator,
+// participates in validator configuration settings.
+//
+// This interface may be split from the expectation of being an ASTValidator instance in the future.
+type ASTValidatorConfigurer interface {
+ Configure(MutableValidatorConfig) error
+}
+
+// validatorConfig implements the ValidatorConfig and MutableValidatorConfig interfaces.
+type validatorConfig struct {
+ data map[string]any
+}
+
+// newValidatorConfig initializes the validator config with default values for core CEL validators.
+func newValidatorConfig() *validatorConfig {
+ return &validatorConfig{
+ data: map[string]any{
+ HomogeneousAggregateLiteralExemptFunctions: []string{},
+ },
+ }
+}
+
+// GetOrDefault returns the configured value for the name, if present, else the input default value.
+//
+// Note, the type-agreement between the input default and configured value is not checked on read.
+func (config *validatorConfig) GetOrDefault(name string, value any) any {
+ v, found := config.data[name]
+ if !found {
+ return value
+ }
+ return v
+}
+
+// Set configures a validator option with the given name and value.
+//
+// If the value had previously been set, the new value must have the same reflection type as the old one,
+// or the call will error.
+func (config *validatorConfig) Set(name string, value any) error {
+ v, found := config.data[name]
+ if found && reflect.TypeOf(v) != reflect.TypeOf(value) {
+ return fmt.Errorf("incompatible configuration type for %s, got %T, wanted %T", name, value, v)
+ }
+ config.data[name] = value
+ return nil
+}
+
+// ExtendedValidations collects a set of common AST validations which reduce the likelihood of runtime errors.
+//
+// - Validate duration and timestamp literals
+// - Ensure regex strings are valid
+// - Disable mixed type list and map literals
+func ExtendedValidations() EnvOption {
+ return ASTValidators(
+ ValidateDurationLiterals(),
+ ValidateTimestampLiterals(),
+ ValidateRegexLiterals(),
+ ValidateHomogeneousAggregateLiterals(),
+ )
+}
+
+// ValidateDurationLiterals ensures that duration literal arguments are valid immediately after type-check.
+func ValidateDurationLiterals() ASTValidator {
+ return newFormatValidator(overloads.TypeConvertDuration, 0, evalCall)
+}
+
+// ValidateTimestampLiterals ensures that timestamp literal arguments are valid immediately after type-check.
+func ValidateTimestampLiterals() ASTValidator {
+ return newFormatValidator(overloads.TypeConvertTimestamp, 0, evalCall)
+}
+
+// ValidateRegexLiterals ensures that regex patterns are validated after type-check.
+func ValidateRegexLiterals() ASTValidator {
+ return newFormatValidator(overloads.Matches, 0, compileRegex)
+}
+
+// ValidateHomogeneousAggregateLiterals checks that all list and map literals entries have the same types, i.e.
+// no mixed list element types or mixed map key or map value types.
+//
+// Note: the string format call relies on a mixed element type list for ease of use, so this check skips all
+// literals which occur within string format calls.
+func ValidateHomogeneousAggregateLiterals() ASTValidator {
+ return homogeneousAggregateLiteralValidator{}
+}
+
+// ValidateComprehensionNestingLimit ensures that comprehension nesting does not exceed the specified limit.
+//
+// This validator can be useful for preventing arbitrarily nested comprehensions which can take high polynomial
+// time to complete.
+//
+// Note, this limit does not apply to comprehensions with an empty iteration range, as these comprehensions have
+// no actual looping cost. The cel.bind() utilizes the comprehension structure to perform local variable
+// assignments and supplies an empty iteration range, so they won't count against the nesting limit either.
+func ValidateComprehensionNestingLimit(limit int) ASTValidator {
+ return nestingLimitValidator{limit: limit}
+}
+
+type argChecker func(env *Env, call, arg ast.NavigableExpr) error
+
+func newFormatValidator(funcName string, argNum int, check argChecker) formatValidator {
+ return formatValidator{
+ funcName: funcName,
+ check: check,
+ argNum: argNum,
+ }
+}
+
+type formatValidator struct {
+ funcName string
+ argNum int
+ check argChecker
+}
+
+// Name returns the unique name of this function format validator.
+func (v formatValidator) Name() string {
+ return fmt.Sprintf("cel.lib.std.validate.functions.%s", v.funcName)
+}
+
+// Validate searches the AST for uses of a given function name with a constant argument and performs a check
+// on whether the argument is a valid literal value.
+func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
+ root := ast.NavigateCheckedAST(a)
+ funcCalls := ast.MatchDescendants(root, ast.FunctionMatcher(v.funcName))
+ for _, call := range funcCalls {
+ callArgs := call.AsCall().Args()
+ if len(callArgs) <= v.argNum {
+ continue
+ }
+ litArg := callArgs[v.argNum]
+ if litArg.Kind() != ast.LiteralKind {
+ continue
+ }
+ if err := v.check(e, call, litArg); err != nil {
+ iss.ReportErrorAtID(litArg.ID(), "invalid %s argument", v.funcName)
+ }
+ }
+}
+
+func evalCall(env *Env, call, arg ast.NavigableExpr) error {
+ ast := ParsedExprToAst(&exprpb.ParsedExpr{Expr: call.ToExpr()})
+ prg, err := env.Program(ast)
+ if err != nil {
+ return err
+ }
+ _, _, err = prg.Eval(NoVars())
+ return err
+}
+
+func compileRegex(_ *Env, _, arg ast.NavigableExpr) error {
+ pattern := arg.AsLiteral().Value().(string)
+ _, err := regexp.Compile(pattern)
+ return err
+}
+
+type homogeneousAggregateLiteralValidator struct{}
+
+// Name returns the unique name of the homogeneous type validator.
+func (homogeneousAggregateLiteralValidator) Name() string {
+ return homogeneousValidatorName
+}
+
+// Configure implements the ASTValidatorConfigurer interface and currently sets the list of standard
+// and exempt functions from homogeneous aggregate literal checks.
+//
+// TODO: Move this call into the string.format() ASTValidator once ported.
+func (homogeneousAggregateLiteralValidator) Configure(c MutableValidatorConfig) error {
+ emptyList := []string{}
+ exemptFunctions := c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, emptyList).([]string)
+ exemptFunctions = append(exemptFunctions, "format")
+ return c.Set(HomogeneousAggregateLiteralExemptFunctions, exemptFunctions)
+}
+
+// Validate validates that all lists and map literals have homogeneous types, i.e. don't contain dyn types.
+//
+// This validator makes an exception for list and map literals which occur at any level of nesting within
+// string format calls.
+func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
+ var exemptedFunctions []string
+ exemptedFunctions = c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, exemptedFunctions).([]string)
+ root := ast.NavigateCheckedAST(a)
+ listExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.ListKind))
+ for _, listExpr := range listExprs {
+ if inExemptFunction(listExpr, exemptedFunctions) {
+ continue
+ }
+ l := listExpr.AsList()
+ elements := l.Elements()
+ optIndices := l.OptionalIndices()
+ var elemType *Type
+ for i, e := range elements {
+ et := e.Type()
+ if isOptionalIndex(i, optIndices) {
+ et = et.Parameters()[0]
+ }
+ if elemType == nil {
+ elemType = et
+ continue
+ }
+ if !elemType.IsEquivalentType(et) {
+ v.typeMismatch(iss, e.ID(), elemType, et)
+ break
+ }
+ }
+ }
+ mapExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.MapKind))
+ for _, mapExpr := range mapExprs {
+ if inExemptFunction(mapExpr, exemptedFunctions) {
+ continue
+ }
+ m := mapExpr.AsMap()
+ entries := m.Entries()
+ var keyType, valType *Type
+ for _, e := range entries {
+ key, val := e.Key(), e.Value()
+ kt, vt := key.Type(), val.Type()
+ if e.IsOptional() {
+ vt = vt.Parameters()[0]
+ }
+ if keyType == nil && valType == nil {
+ keyType, valType = kt, vt
+ continue
+ }
+ if !keyType.IsEquivalentType(kt) {
+ v.typeMismatch(iss, key.ID(), keyType, kt)
+ }
+ if !valType.IsEquivalentType(vt) {
+ v.typeMismatch(iss, val.ID(), valType, vt)
+ }
+ }
+ }
+}
+
+func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool {
+ if parent, found := e.Parent(); found {
+ if parent.Kind() == ast.CallKind {
+ fnName := parent.AsCall().FunctionName()
+ for _, exempt := range exemptFunctions {
+ if exempt == fnName {
+ return true
+ }
+ }
+ }
+ if parent.Kind() == ast.ListKind || parent.Kind() == ast.MapKind {
+ return inExemptFunction(parent, exemptFunctions)
+ }
+ }
+ return false
+}
+
+func isOptionalIndex(i int, optIndices []int32) bool {
+ for _, optInd := range optIndices {
+ if i == int(optInd) {
+ return true
+ }
+ }
+ return false
+}
+
+func (homogeneousAggregateLiteralValidator) typeMismatch(iss *Issues, id int64, expected, actual *Type) {
+ iss.ReportErrorAtID(id, "expected type '%s' but found '%s'", FormatCELType(expected), FormatCELType(actual))
+}
+
+type nestingLimitValidator struct {
+ limit int
+}
+
+func (v nestingLimitValidator) Name() string {
+ return "cel.lib.std.validate.comprehension_nesting_limit"
+}
+
+func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
+ root := ast.NavigateCheckedAST(a)
+ comprehensions := ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind))
+ if len(comprehensions) <= v.limit {
+ return
+ }
+ for _, comp := range comprehensions {
+ count := 0
+ e := comp
+ hasParent := true
+ for hasParent {
+ // When the expression is not a comprehension, continue to the next ancestor.
+ if e.Kind() != ast.ComprehensionKind {
+ e, hasParent = e.Parent()
+ continue
+ }
+ // When the comprehension has an empty range, continue to the next ancestor
+ // as this comprehension does not have any associated cost.
+ iterRange := e.AsComprehension().IterRange()
+ if iterRange.Kind() == ast.ListKind && iterRange.AsList().Size() == 0 {
+ e, hasParent = e.Parent()
+ continue
+ }
+ // Otherwise check the nesting limit.
+ count++
+ if count > v.limit {
+ iss.ReportErrorAtID(comp.ID(), "comprehension exceeds nesting limit")
+ break
+ }
+ e, hasParent = e.Parent()
+ }
+ }
+}
diff --git a/vendor/github.com/google/cel-go/checker/BUILD.bazel b/vendor/github.com/google/cel-go/checker/BUILD.bazel
new file mode 100644
index 0000000..0459d35
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/BUILD.bazel
@@ -0,0 +1,66 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "checker.go",
+ "cost.go",
+ "env.go",
+ "errors.go",
+ "format.go",
+ "mapping.go",
+ "options.go",
+ "printer.go",
+ "scopes.go",
+ "standard.go",
+ "types.go",
+ ],
+ importpath = "github.com/google/cel-go/checker",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//checker/decls:go_default_library",
+ "//common:go_default_library",
+ "//common/ast:go_default_library",
+ "//common/containers:go_default_library",
+ "//common/debug:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/stdlib:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/pb:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//parser:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "checker_test.go",
+ "cost_test.go",
+ "env_test.go",
+ "format_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//common/types:go_default_library",
+ "//parser:go_default_library",
+ "//test:go_default_library",
+ "//test/proto2pb:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/checker/checker.go b/vendor/github.com/google/cel-go/checker/checker.go
new file mode 100644
index 0000000..720e4fa
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/checker.go
@@ -0,0 +1,760 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package checker defines functions to type-checked a parsed expression
+// against a set of identifier and function declarations.
+package checker
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/types"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+type checker struct {
+ env *Env
+ errors *typeErrors
+ mappings *mapping
+ freeTypeVarCounter int
+ sourceInfo *exprpb.SourceInfo
+ types map[int64]*types.Type
+ references map[int64]*ast.ReferenceInfo
+}
+
+// Check performs type checking, giving a typed AST.
+// The input is a ParsedExpr proto and an env which encapsulates
+// type binding of variables, declarations of built-in functions,
+// descriptions of protocol buffers, and a registry for errors.
+// Returns a CheckedExpr proto, which might not be usable if
+// there are errors in the error registry.
+func Check(parsedExpr *exprpb.ParsedExpr, source common.Source, env *Env) (*ast.CheckedAST, *common.Errors) {
+ errs := common.NewErrors(source)
+ c := checker{
+ env: env,
+ errors: &typeErrors{errs: errs},
+ mappings: newMapping(),
+ freeTypeVarCounter: 0,
+ sourceInfo: parsedExpr.GetSourceInfo(),
+ types: make(map[int64]*types.Type),
+ references: make(map[int64]*ast.ReferenceInfo),
+ }
+ c.check(parsedExpr.GetExpr())
+
+ // Walk over the final type map substituting any type parameters either by their bound value or
+ // by DYN.
+ m := make(map[int64]*types.Type)
+ for id, t := range c.types {
+ m[id] = substitute(c.mappings, t, true)
+ }
+
+ return &ast.CheckedAST{
+ Expr: parsedExpr.GetExpr(),
+ SourceInfo: parsedExpr.GetSourceInfo(),
+ TypeMap: m,
+ ReferenceMap: c.references,
+ }, errs
+}
+
+func (c *checker) check(e *exprpb.Expr) {
+ if e == nil {
+ return
+ }
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ literal := e.GetConstExpr()
+ switch literal.GetConstantKind().(type) {
+ case *exprpb.Constant_BoolValue:
+ c.checkBoolLiteral(e)
+ case *exprpb.Constant_BytesValue:
+ c.checkBytesLiteral(e)
+ case *exprpb.Constant_DoubleValue:
+ c.checkDoubleLiteral(e)
+ case *exprpb.Constant_Int64Value:
+ c.checkInt64Literal(e)
+ case *exprpb.Constant_NullValue:
+ c.checkNullLiteral(e)
+ case *exprpb.Constant_StringValue:
+ c.checkStringLiteral(e)
+ case *exprpb.Constant_Uint64Value:
+ c.checkUint64Literal(e)
+ }
+ case *exprpb.Expr_IdentExpr:
+ c.checkIdent(e)
+ case *exprpb.Expr_SelectExpr:
+ c.checkSelect(e)
+ case *exprpb.Expr_CallExpr:
+ c.checkCall(e)
+ case *exprpb.Expr_ListExpr:
+ c.checkCreateList(e)
+ case *exprpb.Expr_StructExpr:
+ c.checkCreateStruct(e)
+ case *exprpb.Expr_ComprehensionExpr:
+ c.checkComprehension(e)
+ default:
+ c.errors.unexpectedASTType(e.GetId(), c.location(e), e)
+ }
+}
+
+func (c *checker) checkInt64Literal(e *exprpb.Expr) {
+ c.setType(e, types.IntType)
+}
+
+func (c *checker) checkUint64Literal(e *exprpb.Expr) {
+ c.setType(e, types.UintType)
+}
+
+func (c *checker) checkStringLiteral(e *exprpb.Expr) {
+ c.setType(e, types.StringType)
+}
+
+func (c *checker) checkBytesLiteral(e *exprpb.Expr) {
+ c.setType(e, types.BytesType)
+}
+
+func (c *checker) checkDoubleLiteral(e *exprpb.Expr) {
+ c.setType(e, types.DoubleType)
+}
+
+func (c *checker) checkBoolLiteral(e *exprpb.Expr) {
+ c.setType(e, types.BoolType)
+}
+
+func (c *checker) checkNullLiteral(e *exprpb.Expr) {
+ c.setType(e, types.NullType)
+}
+
+func (c *checker) checkIdent(e *exprpb.Expr) {
+ identExpr := e.GetIdentExpr()
+ // Check to see if the identifier is declared.
+ if ident := c.env.LookupIdent(identExpr.GetName()); ident != nil {
+ c.setType(e, ident.Type())
+ c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value()))
+ // Overwrite the identifier with its fully qualified name.
+ identExpr.Name = ident.Name()
+ return
+ }
+
+ c.setType(e, types.ErrorType)
+ c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), identExpr.GetName())
+}
+
+func (c *checker) checkSelect(e *exprpb.Expr) {
+ sel := e.GetSelectExpr()
+ // Before traversing down the tree, try to interpret as qualified name.
+ qname, found := containers.ToQualifiedName(e)
+ if found {
+ ident := c.env.LookupIdent(qname)
+ if ident != nil {
+ // We don't check for a TestOnly expression here since the `found` result is
+ // always going to be false for TestOnly expressions.
+
+ // Rewrite the node to be a variable reference to the resolved fully-qualified
+ // variable name.
+ c.setType(e, ident.Type())
+ c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value()))
+ identName := ident.Name()
+ e.ExprKind = &exprpb.Expr_IdentExpr{
+ IdentExpr: &exprpb.Expr_Ident{
+ Name: identName,
+ },
+ }
+ return
+ }
+ }
+
+ resultType := c.checkSelectField(e, sel.GetOperand(), sel.GetField(), false)
+ if sel.TestOnly {
+ resultType = types.BoolType
+ }
+ c.setType(e, substitute(c.mappings, resultType, false))
+}
+
+func (c *checker) checkOptSelect(e *exprpb.Expr) {
+ // Collect metadata related to the opt select call packaged by the parser.
+ call := e.GetCallExpr()
+ operand := call.GetArgs()[0]
+ field := call.GetArgs()[1]
+ fieldName, isString := maybeUnwrapString(field)
+ if !isString {
+ c.errors.notAnOptionalFieldSelection(field.GetId(), c.location(field), field)
+ return
+ }
+
+ // Perform type-checking using the field selection logic.
+ resultType := c.checkSelectField(e, operand, fieldName, true)
+ c.setType(e, substitute(c.mappings, resultType, false))
+ c.setReference(e, ast.NewFunctionReference("select_optional_field"))
+}
+
+func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *types.Type {
+ // Interpret as field selection, first traversing down the operand.
+ c.check(operand)
+ operandType := substitute(c.mappings, c.getType(operand), false)
+
+ // If the target type is 'optional', unwrap it for the sake of this check.
+ targetType, isOpt := maybeUnwrapOptional(operandType)
+
+ // Assume error type by default as most types do not support field selection.
+ resultType := types.ErrorType
+ switch targetType.Kind() {
+ case types.MapKind:
+ // Maps yield their value type as the selection result type.
+ resultType = targetType.Parameters()[1]
+ case types.StructKind:
+ // Objects yield their field type declaration as the selection result type, but only if
+ // the field is defined.
+ messageType := targetType
+ if fieldType, found := c.lookupFieldType(e.GetId(), messageType.TypeName(), field); found {
+ resultType = fieldType
+ }
+ case types.TypeParamKind:
+ // Set the operand type to DYN to prevent assignment to a potentially incorrect type
+ // at a later point in type-checking. The isAssignable call will update the type
+ // substitutions for the type param under the covers.
+ c.isAssignable(types.DynType, targetType)
+ // Also, set the result type to DYN.
+ resultType = types.DynType
+ default:
+ // Dynamic / error values are treated as DYN type. Errors are handled this way as well
+ // in order to allow forward progress on the check.
+ if !isDynOrError(targetType) {
+ c.errors.typeDoesNotSupportFieldSelection(e.GetId(), c.location(e), targetType)
+ }
+ resultType = types.DynType
+ }
+
+ // If the target type was optional coming in, then the result must be optional going out.
+ if isOpt || optional {
+ return types.NewOptionalType(resultType)
+ }
+ return resultType
+}
+
+func (c *checker) checkCall(e *exprpb.Expr) {
+ // Note: similar logic exists within the `interpreter/planner.go`. If making changes here
+ // please consider the impact on planner.go and consolidate implementations or mirror code
+ // as appropriate.
+ call := e.GetCallExpr()
+ fnName := call.GetFunction()
+ if fnName == operators.OptSelect {
+ c.checkOptSelect(e)
+ return
+ }
+
+ args := call.GetArgs()
+ // Traverse arguments.
+ for _, arg := range args {
+ c.check(arg)
+ }
+
+ target := call.GetTarget()
+ // Regular static call with simple name.
+ if target == nil {
+ // Check for the existence of the function.
+ fn := c.env.LookupFunction(fnName)
+ if fn == nil {
+ c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName)
+ c.setType(e, types.ErrorType)
+ return
+ }
+ // Overwrite the function name with its fully qualified resolved name.
+ call.Function = fn.Name()
+ // Check to see whether the overload resolves.
+ c.resolveOverloadOrError(e, fn, nil, args)
+ return
+ }
+
+ // If a receiver 'target' is present, it may either be a receiver function, or a namespaced
+ // function, but not both. Given a.b.c() either a.b.c is a function or c is a function with
+ // target a.b.
+ //
+ // Check whether the target is a namespaced function name.
+ qualifiedPrefix, maybeQualified := containers.ToQualifiedName(target)
+ if maybeQualified {
+ maybeQualifiedName := qualifiedPrefix + "." + fnName
+ fn := c.env.LookupFunction(maybeQualifiedName)
+ if fn != nil {
+ // The function name is namespaced and so preserving the target operand would
+ // be an inaccurate representation of the desired evaluation behavior.
+ // Overwrite with fully-qualified resolved function name sans receiver target.
+ call.Target = nil
+ call.Function = fn.Name()
+ c.resolveOverloadOrError(e, fn, nil, args)
+ return
+ }
+ }
+
+ // Regular instance call.
+ c.check(call.Target)
+ fn := c.env.LookupFunction(fnName)
+ // Function found, attempt overload resolution.
+ if fn != nil {
+ c.resolveOverloadOrError(e, fn, target, args)
+ return
+ }
+ // Function name not declared, record error.
+ c.setType(e, types.ErrorType)
+ c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName)
+}
+
+func (c *checker) resolveOverloadOrError(
+ e *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) {
+ // Attempt to resolve the overload.
+ resolution := c.resolveOverload(e, fn, target, args)
+ // No such overload, error noted in the resolveOverload call, type recorded here.
+ if resolution == nil {
+ c.setType(e, types.ErrorType)
+ return
+ }
+ // Overload found.
+ c.setType(e, resolution.Type)
+ c.setReference(e, resolution.Reference)
+}
+
+func (c *checker) resolveOverload(
+ call *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution {
+
+ var argTypes []*types.Type
+ if target != nil {
+ argTypes = append(argTypes, c.getType(target))
+ }
+ for _, arg := range args {
+ argTypes = append(argTypes, c.getType(arg))
+ }
+
+ var resultType *types.Type
+ var checkedRef *ast.ReferenceInfo
+ for _, overload := range fn.OverloadDecls() {
+ // Determine whether the overload is currently considered.
+ if c.env.isOverloadDisabled(overload.ID()) {
+ continue
+ }
+
+ // Ensure the call style for the overload matches.
+ if (target == nil && overload.IsMemberFunction()) ||
+ (target != nil && !overload.IsMemberFunction()) {
+ // not a compatible call style.
+ continue
+ }
+
+ // Alternative type-checking behavior when the logical operators are compacted into
+ // variadic AST representations.
+ if fn.Name() == operators.LogicalAnd || fn.Name() == operators.LogicalOr {
+ checkedRef = ast.NewFunctionReference(overload.ID())
+ for i, argType := range argTypes {
+ if !c.isAssignable(argType, types.BoolType) {
+ c.errors.typeMismatch(
+ args[i].GetId(),
+ c.locationByID(args[i].GetId()),
+ types.BoolType,
+ argType)
+ resultType = types.ErrorType
+ }
+ }
+ if isError(resultType) {
+ return nil
+ }
+ return newResolution(checkedRef, types.BoolType)
+ }
+
+ overloadType := newFunctionType(overload.ResultType(), overload.ArgTypes()...)
+ typeParams := overload.TypeParams()
+ if len(typeParams) != 0 {
+ // Instantiate overload's type with fresh type variables.
+ substitutions := newMapping()
+ for _, typePar := range typeParams {
+ substitutions.add(types.NewTypeParamType(typePar), c.newTypeVar())
+ }
+ overloadType = substitute(substitutions, overloadType, false)
+ }
+
+ candidateArgTypes := overloadType.Parameters()[1:]
+ if c.isAssignableList(argTypes, candidateArgTypes) {
+ if checkedRef == nil {
+ checkedRef = ast.NewFunctionReference(overload.ID())
+ } else {
+ checkedRef.AddOverload(overload.ID())
+ }
+
+ // First matching overload, determines result type.
+ fnResultType := substitute(c.mappings, overloadType.Parameters()[0], false)
+ if resultType == nil {
+ resultType = fnResultType
+ } else if !isDyn(resultType) && !fnResultType.IsExactType(resultType) {
+ resultType = types.DynType
+ }
+ }
+ }
+
+ if resultType == nil {
+ for i, argType := range argTypes {
+ argTypes[i] = substitute(c.mappings, argType, true)
+ }
+ c.errors.noMatchingOverload(call.GetId(), c.location(call), fn.Name(), argTypes, target != nil)
+ return nil
+ }
+
+ return newResolution(checkedRef, resultType)
+}
+
+func (c *checker) checkCreateList(e *exprpb.Expr) {
+ create := e.GetListExpr()
+ var elemsType *types.Type
+ optionalIndices := create.GetOptionalIndices()
+ optionals := make(map[int32]bool, len(optionalIndices))
+ for _, optInd := range optionalIndices {
+ optionals[optInd] = true
+ }
+ for i, e := range create.GetElements() {
+ c.check(e)
+ elemType := c.getType(e)
+ if optionals[int32(i)] {
+ var isOptional bool
+ elemType, isOptional = maybeUnwrapOptional(elemType)
+ if !isOptional && !isDyn(elemType) {
+ c.errors.typeMismatch(e.GetId(), c.location(e), types.NewOptionalType(elemType), elemType)
+ }
+ }
+ elemsType = c.joinTypes(e, elemsType, elemType)
+ }
+ if elemsType == nil {
+ // If the list is empty, assign free type var to elem type.
+ elemsType = c.newTypeVar()
+ }
+ c.setType(e, types.NewListType(elemsType))
+}
+
+func (c *checker) checkCreateStruct(e *exprpb.Expr) {
+ str := e.GetStructExpr()
+ if str.GetMessageName() != "" {
+ c.checkCreateMessage(e)
+ } else {
+ c.checkCreateMap(e)
+ }
+}
+
+func (c *checker) checkCreateMap(e *exprpb.Expr) {
+ mapVal := e.GetStructExpr()
+ var mapKeyType *types.Type
+ var mapValueType *types.Type
+ for _, ent := range mapVal.GetEntries() {
+ key := ent.GetMapKey()
+ c.check(key)
+ mapKeyType = c.joinTypes(key, mapKeyType, c.getType(key))
+
+ val := ent.GetValue()
+ c.check(val)
+ valType := c.getType(val)
+ if ent.GetOptionalEntry() {
+ var isOptional bool
+ valType, isOptional = maybeUnwrapOptional(valType)
+ if !isOptional && !isDyn(valType) {
+ c.errors.typeMismatch(val.GetId(), c.location(val), types.NewOptionalType(valType), valType)
+ }
+ }
+ mapValueType = c.joinTypes(val, mapValueType, valType)
+ }
+ if mapKeyType == nil {
+ // If the map is empty, assign free type variables to typeKey and value type.
+ mapKeyType = c.newTypeVar()
+ mapValueType = c.newTypeVar()
+ }
+ c.setType(e, types.NewMapType(mapKeyType, mapValueType))
+}
+
+func (c *checker) checkCreateMessage(e *exprpb.Expr) {
+ msgVal := e.GetStructExpr()
+ // Determine the type of the message.
+ resultType := types.ErrorType
+ ident := c.env.LookupIdent(msgVal.GetMessageName())
+ if ident == nil {
+ c.errors.undeclaredReference(
+ e.GetId(), c.location(e), c.env.container.Name(), msgVal.GetMessageName())
+ c.setType(e, types.ErrorType)
+ return
+ }
+ // Ensure the type name is fully qualified in the AST.
+ typeName := ident.Name()
+ msgVal.MessageName = typeName
+ c.setReference(e, ast.NewIdentReference(ident.Name(), nil))
+ identKind := ident.Type().Kind()
+ if identKind != types.ErrorKind {
+ if identKind != types.TypeKind {
+ c.errors.notAType(e.GetId(), c.location(e), ident.Type().DeclaredTypeName())
+ } else {
+ resultType = ident.Type().Parameters()[0]
+ // Backwards compatibility test between well-known types and message types
+ // In this context, the type is being instantiated by its protobuf name which
+ // is not ideal or recommended, but some users expect this to work.
+ if isWellKnownType(resultType) {
+ typeName = getWellKnownTypeName(resultType)
+ } else if resultType.Kind() == types.StructKind {
+ typeName = resultType.DeclaredTypeName()
+ } else {
+ c.errors.notAMessageType(e.GetId(), c.location(e), resultType.DeclaredTypeName())
+ resultType = types.ErrorType
+ }
+ }
+ }
+ c.setType(e, resultType)
+
+ // Check the field initializers.
+ for _, ent := range msgVal.GetEntries() {
+ field := ent.GetFieldKey()
+ value := ent.GetValue()
+ c.check(value)
+
+ fieldType := types.ErrorType
+ ft, found := c.lookupFieldType(ent.GetId(), typeName, field)
+ if found {
+ fieldType = ft
+ }
+
+ valType := c.getType(value)
+ if ent.GetOptionalEntry() {
+ var isOptional bool
+ valType, isOptional = maybeUnwrapOptional(valType)
+ if !isOptional && !isDyn(valType) {
+ c.errors.typeMismatch(value.GetId(), c.location(value), types.NewOptionalType(valType), valType)
+ }
+ }
+ if !c.isAssignable(fieldType, valType) {
+ c.errors.fieldTypeMismatch(ent.GetId(), c.locationByID(ent.GetId()), field, fieldType, valType)
+ }
+ }
+}
+
+func (c *checker) checkComprehension(e *exprpb.Expr) {
+ comp := e.GetComprehensionExpr()
+ c.check(comp.GetIterRange())
+ c.check(comp.GetAccuInit())
+ accuType := c.getType(comp.GetAccuInit())
+ rangeType := substitute(c.mappings, c.getType(comp.GetIterRange()), false)
+ var varType *types.Type
+
+ switch rangeType.Kind() {
+ case types.ListKind:
+ varType = rangeType.Parameters()[0]
+ case types.MapKind:
+ // Ranges over the keys.
+ varType = rangeType.Parameters()[0]
+ case types.DynKind, types.ErrorKind, types.TypeParamKind:
+ // Set the range type to DYN to prevent assignment to a potentially incorrect type
+ // at a later point in type-checking. The isAssignable call will update the type
+ // substitutions for the type param under the covers.
+ c.isAssignable(types.DynType, rangeType)
+ // Set the range iteration variable to type DYN as well.
+ varType = types.DynType
+ default:
+ c.errors.notAComprehensionRange(comp.GetIterRange().GetId(), c.location(comp.GetIterRange()), rangeType)
+ varType = types.ErrorType
+ }
+
+ // Create a scope for the comprehension since it has a local accumulation variable.
+ // This scope will contain the accumulation variable used to compute the result.
+ c.env = c.env.enterScope()
+ c.env.AddIdents(decls.NewVariable(comp.GetAccuVar(), accuType))
+ // Create a block scope for the loop.
+ c.env = c.env.enterScope()
+ c.env.AddIdents(decls.NewVariable(comp.GetIterVar(), varType))
+ // Check the variable references in the condition and step.
+ c.check(comp.GetLoopCondition())
+ c.assertType(comp.GetLoopCondition(), types.BoolType)
+ c.check(comp.GetLoopStep())
+ c.assertType(comp.GetLoopStep(), accuType)
+ // Exit the loop's block scope before checking the result.
+ c.env = c.env.exitScope()
+ c.check(comp.GetResult())
+ // Exit the comprehension scope.
+ c.env = c.env.exitScope()
+ c.setType(e, substitute(c.mappings, c.getType(comp.GetResult()), false))
+}
+
+// Checks compatibility of joined types, and returns the most general common type.
+func (c *checker) joinTypes(e *exprpb.Expr, previous, current *types.Type) *types.Type {
+ if previous == nil {
+ return current
+ }
+ if c.isAssignable(previous, current) {
+ return mostGeneral(previous, current)
+ }
+ if c.dynAggregateLiteralElementTypesEnabled() {
+ return types.DynType
+ }
+ c.errors.typeMismatch(e.GetId(), c.location(e), previous, current)
+ return types.ErrorType
+}
+
+func (c *checker) dynAggregateLiteralElementTypesEnabled() bool {
+ return c.env.aggLitElemType == dynElementType
+}
+
+func (c *checker) newTypeVar() *types.Type {
+ id := c.freeTypeVarCounter
+ c.freeTypeVarCounter++
+ return types.NewTypeParamType(fmt.Sprintf("_var%d", id))
+}
+
+func (c *checker) isAssignable(t1, t2 *types.Type) bool {
+ subs := isAssignable(c.mappings, t1, t2)
+ if subs != nil {
+ c.mappings = subs
+ return true
+ }
+
+ return false
+}
+
+func (c *checker) isAssignableList(l1, l2 []*types.Type) bool {
+ subs := isAssignableList(c.mappings, l1, l2)
+ if subs != nil {
+ c.mappings = subs
+ return true
+ }
+
+ return false
+}
+
+func maybeUnwrapString(e *exprpb.Expr) (string, bool) {
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ literal := e.GetConstExpr()
+ switch literal.GetConstantKind().(type) {
+ case *exprpb.Constant_StringValue:
+ return literal.GetStringValue(), true
+ }
+ }
+ return "", false
+}
+
+func (c *checker) setType(e *exprpb.Expr, t *types.Type) {
+ if old, found := c.types[e.GetId()]; found && !old.IsExactType(t) {
+ c.errors.incompatibleType(e.GetId(), c.location(e), e, old, t)
+ return
+ }
+ c.types[e.GetId()] = t
+}
+
+func (c *checker) getType(e *exprpb.Expr) *types.Type {
+ return c.types[e.GetId()]
+}
+
+func (c *checker) setReference(e *exprpb.Expr, r *ast.ReferenceInfo) {
+ if old, found := c.references[e.GetId()]; found && !old.Equals(r) {
+ c.errors.referenceRedefinition(e.GetId(), c.location(e), e, old, r)
+ return
+ }
+ c.references[e.GetId()] = r
+}
+
+func (c *checker) assertType(e *exprpb.Expr, t *types.Type) {
+ if !c.isAssignable(t, c.getType(e)) {
+ c.errors.typeMismatch(e.GetId(), c.location(e), t, c.getType(e))
+ }
+}
+
+type overloadResolution struct {
+ Type *types.Type
+ Reference *ast.ReferenceInfo
+}
+
+func newResolution(r *ast.ReferenceInfo, t *types.Type) *overloadResolution {
+ return &overloadResolution{
+ Reference: r,
+ Type: t,
+ }
+}
+
+func (c *checker) location(e *exprpb.Expr) common.Location {
+ return c.locationByID(e.GetId())
+}
+
+func (c *checker) locationByID(id int64) common.Location {
+ positions := c.sourceInfo.GetPositions()
+ var line = 1
+ if offset, found := positions[id]; found {
+ col := int(offset)
+ for _, lineOffset := range c.sourceInfo.GetLineOffsets() {
+ if lineOffset < offset {
+ line++
+ col = int(offset - lineOffset)
+ } else {
+ break
+ }
+ }
+ return common.NewLocation(line, col)
+ }
+ return common.NoLocation
+}
+
+func (c *checker) lookupFieldType(exprID int64, structType, fieldName string) (*types.Type, bool) {
+ if _, found := c.env.provider.FindStructType(structType); !found {
+ // This should not happen, anyway, report an error.
+ c.errors.unexpectedFailedResolution(exprID, c.locationByID(exprID), structType)
+ return nil, false
+ }
+
+ if ft, found := c.env.provider.FindStructFieldType(structType, fieldName); found {
+ return ft.Type, found
+ }
+
+ c.errors.undefinedField(exprID, c.locationByID(exprID), fieldName)
+ return nil, false
+}
+
+func isWellKnownType(t *types.Type) bool {
+ switch t.Kind() {
+ case types.AnyKind, types.TimestampKind, types.DurationKind, types.DynKind, types.NullTypeKind:
+ return true
+ case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind:
+ return t.IsAssignableType(types.NullType)
+ case types.ListKind:
+ return t.Parameters()[0] == types.DynType
+ case types.MapKind:
+ return t.Parameters()[0] == types.StringType && t.Parameters()[1] == types.DynType
+ }
+ return false
+}
+
+func getWellKnownTypeName(t *types.Type) string {
+ if name, found := wellKnownTypes[t.Kind()]; found {
+ return name
+ }
+ return ""
+}
+
+var (
+ wellKnownTypes = map[types.Kind]string{
+ types.AnyKind: "google.protobuf.Any",
+ types.BoolKind: "google.protobuf.BoolValue",
+ types.BytesKind: "google.protobuf.BytesValue",
+ types.DoubleKind: "google.protobuf.DoubleValue",
+ types.DurationKind: "google.protobuf.Duration",
+ types.DynKind: "google.protobuf.Value",
+ types.IntKind: "google.protobuf.Int64Value",
+ types.ListKind: "google.protobuf.ListValue",
+ types.NullTypeKind: "google.protobuf.NullValue",
+ types.MapKind: "google.protobuf.Struct",
+ types.StringKind: "google.protobuf.StringValue",
+ types.TimestampKind: "google.protobuf.Timestamp",
+ types.UintKind: "google.protobuf.UInt64Value",
+ }
+)
diff --git a/vendor/github.com/google/cel-go/checker/cost.go b/vendor/github.com/google/cel-go/checker/cost.go
new file mode 100644
index 0000000..fd3f735
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/cost.go
@@ -0,0 +1,719 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "math"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// WARNING: Any changes to cost calculations in this file require a corresponding change in interpreter/runtimecost.go
+
+// CostEstimator estimates the sizes of variable length input data and the costs of functions.
+type CostEstimator interface {
+ // EstimateSize returns a SizeEstimate for the given AstNode, or nil if
+ // the estimator has no estimate to provide. The size is equivalent to the result of the CEL `size()` function:
+ // length of strings and bytes, number of map entries or number of list items.
+ // EstimateSize is only called for AstNodes where
+ // CEL does not know the size; EstimateSize is not called for values defined inline in CEL where the size
+ // is already obvious to CEL.
+ EstimateSize(element AstNode) *SizeEstimate
+ // EstimateCallCost returns the estimated cost of an invocation, or nil if
+ // the estimator has no estimate to provide.
+ EstimateCallCost(function, overloadID string, target *AstNode, args []AstNode) *CallEstimate
+}
+
+// CallEstimate includes a CostEstimate for the call, and an optional estimate of the result object size.
+// The ResultSize should only be provided if the call results in a map, list, string or bytes.
+type CallEstimate struct {
+ CostEstimate
+ ResultSize *SizeEstimate
+}
+
+// AstNode represents an AST node for the purpose of cost estimations.
+type AstNode interface {
+ // Path returns a field path through the provided type declarations to the type of the AstNode, or nil if the AstNode does not
+ // represent type directly reachable from the provided type declarations.
+ // The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'.
+ Path() []string
+ // Type returns the deduced type of the AstNode.
+ Type() *types.Type
+ // Expr returns the expression of the AstNode.
+ Expr() *exprpb.Expr
+ // ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression.
+ // For constants and inline list and map declarations, the exact size is returned. For concatenated list, strings
+ // and bytes, the size is derived from the size estimates of the operands. nil is returned if there is no
+ // computed size available.
+ ComputedSize() *SizeEstimate
+}
+
+type astNode struct {
+ path []string
+ t *types.Type
+ expr *exprpb.Expr
+ derivedSize *SizeEstimate
+}
+
+func (e astNode) Path() []string {
+ return e.path
+}
+
+func (e astNode) Type() *types.Type {
+ return e.t
+}
+
+func (e astNode) Expr() *exprpb.Expr {
+ return e.expr
+}
+
+func (e astNode) ComputedSize() *SizeEstimate {
+ if e.derivedSize != nil {
+ return e.derivedSize
+ }
+ var v uint64
+ switch ek := e.expr.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ switch ck := ek.ConstExpr.GetConstantKind().(type) {
+ case *exprpb.Constant_StringValue:
+ // converting to runes here is an O(n) operation, but
+ // this is consistent with how size is computed at runtime,
+ // and how the language definition defines string size
+ v = uint64(len([]rune(ck.StringValue)))
+ case *exprpb.Constant_BytesValue:
+ v = uint64(len(ck.BytesValue))
+ case *exprpb.Constant_BoolValue, *exprpb.Constant_DoubleValue, *exprpb.Constant_DurationValue,
+ *exprpb.Constant_Int64Value, *exprpb.Constant_TimestampValue, *exprpb.Constant_Uint64Value,
+ *exprpb.Constant_NullValue:
+ v = uint64(1)
+ default:
+ return nil
+ }
+ case *exprpb.Expr_ListExpr:
+ v = uint64(len(ek.ListExpr.GetElements()))
+ case *exprpb.Expr_StructExpr:
+ if ek.StructExpr.GetMessageName() == "" {
+ v = uint64(len(ek.StructExpr.GetEntries()))
+ }
+ default:
+ return nil
+ }
+
+ return &SizeEstimate{Min: v, Max: v}
+}
+
+// SizeEstimate represents an estimated size of a variable length string, bytes, map or list.
+type SizeEstimate struct {
+ Min, Max uint64
+}
+
+// Add adds to another SizeEstimate and returns the sum.
+// If add would result in an uint64 overflow, the result is math.MaxUint64.
+func (se SizeEstimate) Add(sizeEstimate SizeEstimate) SizeEstimate {
+ return SizeEstimate{
+ addUint64NoOverflow(se.Min, sizeEstimate.Min),
+ addUint64NoOverflow(se.Max, sizeEstimate.Max),
+ }
+}
+
+// Multiply multiplies by another SizeEstimate and returns the product.
+// If multiply would result in an uint64 overflow, the result is math.MaxUint64.
+func (se SizeEstimate) Multiply(sizeEstimate SizeEstimate) SizeEstimate {
+ return SizeEstimate{
+ multiplyUint64NoOverflow(se.Min, sizeEstimate.Min),
+ multiplyUint64NoOverflow(se.Max, sizeEstimate.Max),
+ }
+}
+
+// MultiplyByCostFactor multiplies a SizeEstimate by a cost factor and returns the CostEstimate with the
+// nearest integer of the result, rounded up.
+func (se SizeEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate {
+ return CostEstimate{
+ multiplyByCostFactor(se.Min, costPerUnit),
+ multiplyByCostFactor(se.Max, costPerUnit),
+ }
+}
+
+// MultiplyByCost multiplies by the cost and returns the product.
+// If multiply would result in an uint64 overflow, the result is math.MaxUint64.
+func (se SizeEstimate) MultiplyByCost(cost CostEstimate) CostEstimate {
+ return CostEstimate{
+ multiplyUint64NoOverflow(se.Min, cost.Min),
+ multiplyUint64NoOverflow(se.Max, cost.Max),
+ }
+}
+
+// Union returns a SizeEstimate that encompasses both input the SizeEstimate.
+func (se SizeEstimate) Union(size SizeEstimate) SizeEstimate {
+ result := se
+ if size.Min < result.Min {
+ result.Min = size.Min
+ }
+ if size.Max > result.Max {
+ result.Max = size.Max
+ }
+ return result
+}
+
+// CostEstimate represents an estimated cost range and provides add and multiply operations
+// that do not overflow.
+type CostEstimate struct {
+ Min, Max uint64
+}
+
+// Add adds the costs and returns the sum.
+// If add would result in an uint64 overflow for the min or max, the value is set to math.MaxUint64.
+func (ce CostEstimate) Add(cost CostEstimate) CostEstimate {
+ return CostEstimate{
+ addUint64NoOverflow(ce.Min, cost.Min),
+ addUint64NoOverflow(ce.Max, cost.Max),
+ }
+}
+
+// Multiply multiplies by the cost and returns the product.
+// If multiply would result in an uint64 overflow, the result is math.MaxUint64.
+func (ce CostEstimate) Multiply(cost CostEstimate) CostEstimate {
+ return CostEstimate{
+ multiplyUint64NoOverflow(ce.Min, cost.Min),
+ multiplyUint64NoOverflow(ce.Max, cost.Max),
+ }
+}
+
+// MultiplyByCostFactor multiplies a CostEstimate by a cost factor and returns the CostEstimate with the
+// nearest integer of the result, rounded up.
+func (ce CostEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate {
+ return CostEstimate{
+ multiplyByCostFactor(ce.Min, costPerUnit),
+ multiplyByCostFactor(ce.Max, costPerUnit),
+ }
+}
+
+// Union returns a CostEstimate that encompasses both input the CostEstimates.
+func (ce CostEstimate) Union(size CostEstimate) CostEstimate {
+ result := ce
+ if size.Min < result.Min {
+ result.Min = size.Min
+ }
+ if size.Max > result.Max {
+ result.Max = size.Max
+ }
+ return result
+}
+
+// addUint64NoOverflow adds non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64
+// is returned.
+func addUint64NoOverflow(x, y uint64) uint64 {
+ if y > 0 && x > math.MaxUint64-y {
+ return math.MaxUint64
+ }
+ return x + y
+}
+
+// multiplyUint64NoOverflow multiplies non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64
+// is returned.
+func multiplyUint64NoOverflow(x, y uint64) uint64 {
+ if y != 0 && x > math.MaxUint64/y {
+ return math.MaxUint64
+ }
+ return x * y
+}
+
+// multiplyByFactor multiplies an integer by a cost factor float and returns the nearest integer value, rounded up.
+func multiplyByCostFactor(x uint64, y float64) uint64 {
+ xFloat := float64(x)
+ if xFloat > 0 && y > 0 && xFloat > math.MaxUint64/y {
+ return math.MaxUint64
+ }
+ ceil := math.Ceil(xFloat * y)
+ if ceil >= doubleTwoTo64 {
+ return math.MaxUint64
+ }
+ return uint64(ceil)
+}
+
+var (
+ selectAndIdentCost = CostEstimate{Min: common.SelectAndIdentCost, Max: common.SelectAndIdentCost}
+ constCost = CostEstimate{Min: common.ConstCost, Max: common.ConstCost}
+
+ createListBaseCost = CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost}
+ createMapBaseCost = CostEstimate{Min: common.MapCreateBaseCost, Max: common.MapCreateBaseCost}
+ createMessageBaseCost = CostEstimate{Min: common.StructCreateBaseCost, Max: common.StructCreateBaseCost}
+)
+
+type coster struct {
+ // exprPath maps from Expr Id to field path.
+ exprPath map[int64][]string
+ // iterRanges tracks the iterRange of each iterVar.
+ iterRanges iterRangeScopes
+ // computedSizes tracks the computed sizes of call results.
+ computedSizes map[int64]SizeEstimate
+ checkedAST *ast.CheckedAST
+ estimator CostEstimator
+ overloadEstimators map[string]FunctionEstimator
+ // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations.
+ presenceTestCost CostEstimate
+}
+
+// Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names.
+type iterRangeScopes map[string][]int64
+
+func (vs iterRangeScopes) push(varName string, expr *exprpb.Expr) {
+ vs[varName] = append(vs[varName], expr.GetId())
+}
+
+func (vs iterRangeScopes) pop(varName string) {
+ varStack := vs[varName]
+ vs[varName] = varStack[:len(varStack)-1]
+}
+
+func (vs iterRangeScopes) peek(varName string) (int64, bool) {
+ varStack := vs[varName]
+ if len(varStack) > 0 {
+ return varStack[len(varStack)-1], true
+ }
+ return 0, false
+}
+
+// CostOption configures flags which affect cost computations.
+type CostOption func(*coster) error
+
+// PresenceTestHasCost determines whether presence testing has a cost of one or zero.
+//
+// Defaults to presence test has a cost of one.
+func PresenceTestHasCost(hasCost bool) CostOption {
+ return func(c *coster) error {
+ if hasCost {
+ c.presenceTestCost = selectAndIdentCost
+ return nil
+ }
+ c.presenceTestCost = CostEstimate{Min: 0, Max: 0}
+ return nil
+ }
+}
+
+// FunctionEstimator provides a CallEstimate given the target and arguments for a specific function, overload pair.
+type FunctionEstimator func(estimator CostEstimator, target *AstNode, args []AstNode) *CallEstimate
+
+// OverloadCostEstimate binds a FunctionCoster to a specific function overload ID.
+//
+// When a OverloadCostEstimate is provided, it will override the cost calculation of the CostEstimator provided to
+// the Cost() call.
+func OverloadCostEstimate(overloadID string, functionCoster FunctionEstimator) CostOption {
+ return func(c *coster) error {
+ c.overloadEstimators[overloadID] = functionCoster
+ return nil
+ }
+}
+
+// Cost estimates the cost of the parsed and type checked CEL expression.
+func Cost(checker *ast.CheckedAST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) {
+ c := &coster{
+ checkedAST: checker,
+ estimator: estimator,
+ overloadEstimators: map[string]FunctionEstimator{},
+ exprPath: map[int64][]string{},
+ iterRanges: map[string][]int64{},
+ computedSizes: map[int64]SizeEstimate{},
+ presenceTestCost: CostEstimate{Min: 1, Max: 1},
+ }
+ for _, opt := range opts {
+ err := opt(c)
+ if err != nil {
+ return CostEstimate{}, err
+ }
+ }
+ return c.cost(checker.Expr), nil
+}
+
+func (c *coster) cost(e *exprpb.Expr) CostEstimate {
+ if e == nil {
+ return CostEstimate{}
+ }
+ var cost CostEstimate
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ cost = constCost
+ case *exprpb.Expr_IdentExpr:
+ cost = c.costIdent(e)
+ case *exprpb.Expr_SelectExpr:
+ cost = c.costSelect(e)
+ case *exprpb.Expr_CallExpr:
+ cost = c.costCall(e)
+ case *exprpb.Expr_ListExpr:
+ cost = c.costCreateList(e)
+ case *exprpb.Expr_StructExpr:
+ cost = c.costCreateStruct(e)
+ case *exprpb.Expr_ComprehensionExpr:
+ cost = c.costComprehension(e)
+ default:
+ return CostEstimate{}
+ }
+ return cost
+}
+
+func (c *coster) costIdent(e *exprpb.Expr) CostEstimate {
+ identExpr := e.GetIdentExpr()
+
+ // build and track the field path
+ if iterRange, ok := c.iterRanges.peek(identExpr.GetName()); ok {
+ switch c.checkedAST.TypeMap[iterRange].Kind() {
+ case types.ListKind:
+ c.addPath(e, append(c.exprPath[iterRange], "@items"))
+ case types.MapKind:
+ c.addPath(e, append(c.exprPath[iterRange], "@keys"))
+ }
+ } else {
+ c.addPath(e, []string{identExpr.GetName()})
+ }
+
+ return selectAndIdentCost
+}
+
+func (c *coster) costSelect(e *exprpb.Expr) CostEstimate {
+ sel := e.GetSelectExpr()
+ var sum CostEstimate
+ if sel.GetTestOnly() {
+ // recurse, but do not add any cost
+ // this is equivalent to how evalTestOnly increments the runtime cost counter
+ // but does not add any additional cost for the qualifier, except here we do
+ // the reverse (ident adds cost)
+ sum = sum.Add(c.presenceTestCost)
+ sum = sum.Add(c.cost(sel.GetOperand()))
+ return sum
+ }
+ sum = sum.Add(c.cost(sel.GetOperand()))
+ targetType := c.getType(sel.GetOperand())
+ switch targetType.Kind() {
+ case types.MapKind, types.StructKind, types.TypeParamKind:
+ sum = sum.Add(selectAndIdentCost)
+ }
+
+ // build and track the field path
+ c.addPath(e, append(c.getPath(sel.GetOperand()), sel.GetField()))
+
+ return sum
+}
+
+func (c *coster) costCall(e *exprpb.Expr) CostEstimate {
+ call := e.GetCallExpr()
+ target := call.GetTarget()
+ args := call.GetArgs()
+
+ var sum CostEstimate
+
+ argTypes := make([]AstNode, len(args))
+ argCosts := make([]CostEstimate, len(args))
+ for i, arg := range args {
+ argCosts[i] = c.cost(arg)
+ argTypes[i] = c.newAstNode(arg)
+ }
+
+ ref := c.checkedAST.ReferenceMap[e.GetId()]
+ if ref == nil || len(ref.OverloadIDs) == 0 {
+ return CostEstimate{}
+ }
+ var targetType AstNode
+ if target != nil {
+ if call.Target != nil {
+ sum = sum.Add(c.cost(call.GetTarget()))
+ targetType = c.newAstNode(call.GetTarget())
+ }
+ }
+ // Pick a cost estimate range that covers all the overload cost estimation ranges
+ fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0}
+ var resultSize *SizeEstimate
+ for _, overload := range ref.OverloadIDs {
+ overloadCost := c.functionCost(call.GetFunction(), overload, &targetType, argTypes, argCosts)
+ fnCost = fnCost.Union(overloadCost.CostEstimate)
+ if overloadCost.ResultSize != nil {
+ if resultSize == nil {
+ resultSize = overloadCost.ResultSize
+ } else {
+ size := resultSize.Union(*overloadCost.ResultSize)
+ resultSize = &size
+ }
+ }
+ // build and track the field path for index operations
+ switch overload {
+ case overloads.IndexList:
+ if len(args) > 0 {
+ c.addPath(e, append(c.getPath(args[0]), "@items"))
+ }
+ case overloads.IndexMap:
+ if len(args) > 0 {
+ c.addPath(e, append(c.getPath(args[0]), "@values"))
+ }
+ }
+ }
+ if resultSize != nil {
+ c.computedSizes[e.GetId()] = *resultSize
+ }
+ return sum.Add(fnCost)
+}
+
+func (c *coster) costCreateList(e *exprpb.Expr) CostEstimate {
+ create := e.GetListExpr()
+ var sum CostEstimate
+ for _, e := range create.GetElements() {
+ sum = sum.Add(c.cost(e))
+ }
+ return sum.Add(createListBaseCost)
+}
+
+func (c *coster) costCreateStruct(e *exprpb.Expr) CostEstimate {
+ str := e.GetStructExpr()
+ if str.MessageName != "" {
+ return c.costCreateMessage(e)
+ }
+ return c.costCreateMap(e)
+}
+
+func (c *coster) costCreateMap(e *exprpb.Expr) CostEstimate {
+ mapVal := e.GetStructExpr()
+ var sum CostEstimate
+ for _, ent := range mapVal.GetEntries() {
+ key := ent.GetMapKey()
+ sum = sum.Add(c.cost(key))
+
+ sum = sum.Add(c.cost(ent.GetValue()))
+ }
+ return sum.Add(createMapBaseCost)
+}
+
+func (c *coster) costCreateMessage(e *exprpb.Expr) CostEstimate {
+ msgVal := e.GetStructExpr()
+ var sum CostEstimate
+ for _, ent := range msgVal.GetEntries() {
+ sum = sum.Add(c.cost(ent.GetValue()))
+ }
+ return sum.Add(createMessageBaseCost)
+}
+
+func (c *coster) costComprehension(e *exprpb.Expr) CostEstimate {
+ comp := e.GetComprehensionExpr()
+ var sum CostEstimate
+ sum = sum.Add(c.cost(comp.GetIterRange()))
+ sum = sum.Add(c.cost(comp.GetAccuInit()))
+
+ // Track the iterRange of each IterVar for field path construction
+ c.iterRanges.push(comp.GetIterVar(), comp.GetIterRange())
+ loopCost := c.cost(comp.GetLoopCondition())
+ stepCost := c.cost(comp.GetLoopStep())
+ c.iterRanges.pop(comp.GetIterVar())
+ sum = sum.Add(c.cost(comp.Result))
+ rangeCnt := c.sizeEstimate(c.newAstNode(comp.GetIterRange()))
+
+ c.computedSizes[e.GetId()] = rangeCnt
+
+ rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost))
+ sum = sum.Add(rangeCost)
+
+ return sum
+}
+
+func (c *coster) sizeEstimate(t AstNode) SizeEstimate {
+ if l := t.ComputedSize(); l != nil {
+ return *l
+ }
+ if l := c.estimator.EstimateSize(t); l != nil {
+ return *l
+ }
+ // return an estimate of 1 for return types of set
+ // lengths, since strings/bytes/more complex objects could be of
+ // variable length
+ if isScalar(t.Type()) {
+ // TODO: since the logic for size estimation is split between
+ // ComputedSize and isScalar, changing one will likely require changing
+ // the other, so they should be merged in the future if possible
+ return SizeEstimate{Min: 1, Max: 1}
+ }
+ return SizeEstimate{Min: 0, Max: math.MaxUint64}
+}
+
+func (c *coster) functionCost(function, overloadID string, target *AstNode, args []AstNode, argCosts []CostEstimate) CallEstimate {
+ argCostSum := func() CostEstimate {
+ var sum CostEstimate
+ for _, a := range argCosts {
+ sum = sum.Add(a)
+ }
+ return sum
+ }
+ if len(c.overloadEstimators) != 0 {
+ if estimator, found := c.overloadEstimators[overloadID]; found {
+ if est := estimator(c.estimator, target, args); est != nil {
+ callEst := *est
+ return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize}
+ }
+ }
+ }
+ if est := c.estimator.EstimateCallCost(function, overloadID, target, args); est != nil {
+ callEst := *est
+ return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize}
+ }
+ switch overloadID {
+ // O(n) functions
+ case overloads.ExtFormatString:
+ if target != nil {
+ // ResultSize not calculated because we can't bound the max size.
+ return CallEstimate{CostEstimate: c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
+ }
+ case overloads.StringToBytes:
+ if len(args) == 1 {
+ sz := c.sizeEstimate(args[0])
+ // ResultSize max is when each char converts to 4 bytes.
+ return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min, Max: sz.Max * 4}}
+ }
+ case overloads.BytesToString:
+ if len(args) == 1 {
+ sz := c.sizeEstimate(args[0])
+ // ResultSize min is when 4 bytes convert to 1 char.
+ return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min / 4, Max: sz.Max}}
+ }
+ case overloads.ExtQuoteString:
+ if len(args) == 1 {
+ sz := c.sizeEstimate(args[0])
+ // ResultSize max is when each char is escaped. 2 quote chars always added.
+ return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min + 2, Max: sz.Max*2 + 2}}
+ }
+ case overloads.StartsWithString, overloads.EndsWithString:
+ if len(args) == 1 {
+ return CallEstimate{CostEstimate: c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
+ }
+ case overloads.InList:
+ // If a list is composed entirely of constant values this is O(1), but we don't account for that here.
+ // We just assume all list containment checks are O(n).
+ if len(args) == 2 {
+ return CallEstimate{CostEstimate: c.sizeEstimate(args[1]).MultiplyByCostFactor(1).Add(argCostSum())}
+ }
+ // O(nm) functions
+ case overloads.MatchesString:
+ // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL
+ if target != nil && len(args) == 1 {
+ // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0
+ // in case where string is empty but regex is still expensive.
+ strCost := c.sizeEstimate(*target).Add(SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor)
+ // We don't know how many expressions are in the regex, just the string length (a huge
+ // improvement here would be to somehow get a count the number of expressions in the regex or
+ // how many states are in the regex state machine and use that to measure regex cost).
+ // For now, we're making a guess that each expression in a regex is typically at least 4 chars
+ // in length.
+ regexCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor)
+ return CallEstimate{CostEstimate: strCost.Multiply(regexCost).Add(argCostSum())}
+ }
+ case overloads.ContainsString:
+ if target != nil && len(args) == 1 {
+ strCost := c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor)
+ substrCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor)
+ return CallEstimate{CostEstimate: strCost.Multiply(substrCost).Add(argCostSum())}
+ }
+ case overloads.LogicalOr, overloads.LogicalAnd:
+ lhs := argCosts[0]
+ rhs := argCosts[1]
+ // min cost is min of LHS for short circuited && or ||
+ argCost := CostEstimate{Min: lhs.Min, Max: lhs.Add(rhs).Max}
+ return CallEstimate{CostEstimate: argCost}
+ case overloads.Conditional:
+ size := c.sizeEstimate(args[1]).Union(c.sizeEstimate(args[2]))
+ conditionalCost := argCosts[0]
+ ifTrueCost := argCosts[1]
+ ifFalseCost := argCosts[2]
+ argCost := conditionalCost.Add(ifTrueCost.Union(ifFalseCost))
+ return CallEstimate{CostEstimate: argCost, ResultSize: &size}
+ case overloads.AddString, overloads.AddBytes, overloads.AddList:
+ if len(args) == 2 {
+ lhsSize := c.sizeEstimate(args[0])
+ rhsSize := c.sizeEstimate(args[1])
+ resultSize := lhsSize.Add(rhsSize)
+ switch overloadID {
+ case overloads.AddList:
+ // list concatenation is O(1), but we handle it here to track size
+ return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum()), ResultSize: &resultSize}
+ default:
+ return CallEstimate{CostEstimate: resultSize.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &resultSize}
+ }
+ }
+ case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString,
+ overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes,
+ overloads.Equals, overloads.NotEquals:
+ lhsCost := c.sizeEstimate(args[0])
+ rhsCost := c.sizeEstimate(args[1])
+ min := uint64(0)
+ smallestMax := lhsCost.Max
+ if rhsCost.Max < smallestMax {
+ smallestMax = rhsCost.Max
+ }
+ if smallestMax > 0 {
+ min = 1
+ }
+ // equality of 2 scalar values results in a cost of 1
+ return CallEstimate{CostEstimate: CostEstimate{Min: min, Max: smallestMax}.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
+ }
+ // O(1) functions
+ // See CostTracker.costCall for more details about O(1) cost calculations
+
+ // Benchmarks suggest that most of the other operations take +/- 50% of a base cost unit
+ // which on an Intel xeon 2.20GHz CPU is 50ns.
+ return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())}
+}
+
+func (c *coster) getType(e *exprpb.Expr) *types.Type {
+ return c.checkedAST.TypeMap[e.GetId()]
+}
+
+func (c *coster) getPath(e *exprpb.Expr) []string {
+ return c.exprPath[e.GetId()]
+}
+
+func (c *coster) addPath(e *exprpb.Expr, path []string) {
+ c.exprPath[e.GetId()] = path
+}
+
+func (c *coster) newAstNode(e *exprpb.Expr) *astNode {
+ path := c.getPath(e)
+ if len(path) > 0 && path[0] == parser.AccumulatorName {
+ // only provide paths to root vars; omit accumulator vars
+ path = nil
+ }
+ var derivedSize *SizeEstimate
+ if size, ok := c.computedSizes[e.GetId()]; ok {
+ derivedSize = &size
+ }
+ return &astNode{
+ path: path,
+ t: c.getType(e),
+ expr: e,
+ derivedSize: derivedSize}
+}
+
+// isScalar returns true if the given type is known to be of a constant size at
+// compile time. isScalar will return false for strings (they are variable-width)
+// in addition to protobuf.Any and protobuf.Value (their size is not knowable at compile time).
+func isScalar(t *types.Type) bool {
+ switch t.Kind() {
+ case types.BoolKind, types.DoubleKind, types.DurationKind, types.IntKind, types.TimestampKind, types.UintKind:
+ return true
+ }
+ return false
+}
+
+var (
+ doubleTwoTo64 = math.Ldexp(1.0, 64)
+)
diff --git a/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel b/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
new file mode 100644
index 0000000..a6b0be2
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
@@ -0,0 +1,19 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "decls.go",
+ ],
+ importpath = "github.com/google/cel-go/checker/decls",
+ deps = [
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/checker/decls/decls.go b/vendor/github.com/google/cel-go/checker/decls/decls.go
new file mode 100644
index 0000000..0d91bef
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/decls/decls.go
@@ -0,0 +1,237 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package decls provides helpers for creating variable and function declarations.
+package decls
+
+import (
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+var (
+ // Error type used to communicate issues during type-checking.
+ Error = &exprpb.Type{
+ TypeKind: &exprpb.Type_Error{
+ Error: &emptypb.Empty{}}}
+
+ // Dyn is a top-type used to represent any value.
+ Dyn = &exprpb.Type{
+ TypeKind: &exprpb.Type_Dyn{
+ Dyn: &emptypb.Empty{}}}
+)
+
+// Commonly used types.
+var (
+ Bool = NewPrimitiveType(exprpb.Type_BOOL)
+ Bytes = NewPrimitiveType(exprpb.Type_BYTES)
+ Double = NewPrimitiveType(exprpb.Type_DOUBLE)
+ Int = NewPrimitiveType(exprpb.Type_INT64)
+ Null = &exprpb.Type{
+ TypeKind: &exprpb.Type_Null{
+ Null: structpb.NullValue_NULL_VALUE}}
+ String = NewPrimitiveType(exprpb.Type_STRING)
+ Uint = NewPrimitiveType(exprpb.Type_UINT64)
+)
+
+// Well-known types.
+// TODO: Replace with an abstract type registry.
+var (
+ Any = NewWellKnownType(exprpb.Type_ANY)
+ Duration = NewWellKnownType(exprpb.Type_DURATION)
+ Timestamp = NewWellKnownType(exprpb.Type_TIMESTAMP)
+)
+
+// NewAbstractType creates an abstract type declaration which references a proto
+// message name and may also include type parameters.
+func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_AbstractType_{
+ AbstractType: &exprpb.Type_AbstractType{
+ Name: name,
+ ParameterTypes: paramTypes}}}
+}
+
+// NewOptionalType constructs an abstract type indicating that the parameterized type
+// may be contained within the object.
+func NewOptionalType(paramType *exprpb.Type) *exprpb.Type {
+ return NewAbstractType("optional", paramType)
+}
+
+// NewFunctionType creates a function invocation contract, typically only used
+// by type-checking steps after overload resolution.
+func NewFunctionType(resultType *exprpb.Type,
+ argTypes ...*exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Function{
+ Function: &exprpb.Type_FunctionType{
+ ResultType: resultType,
+ ArgTypes: argTypes}}}
+}
+
+// NewFunction creates a named function declaration with one or more overloads.
+func NewFunction(name string,
+ overloads ...*exprpb.Decl_FunctionDecl_Overload) *exprpb.Decl {
+ return &exprpb.Decl{
+ Name: name,
+ DeclKind: &exprpb.Decl_Function{
+ Function: &exprpb.Decl_FunctionDecl{
+ Overloads: overloads}}}
+}
+
+// NewIdent creates a named identifier declaration with an optional literal
+// value.
+//
+// Literal values are typically only associated with enum identifiers.
+//
+// Deprecated: Use NewVar or NewConst instead.
+func NewIdent(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl {
+ return &exprpb.Decl{
+ Name: name,
+ DeclKind: &exprpb.Decl_Ident{
+ Ident: &exprpb.Decl_IdentDecl{
+ Type: t,
+ Value: v}}}
+}
+
+// NewConst creates a constant identifier with a CEL constant literal value.
+func NewConst(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl {
+ return NewIdent(name, t, v)
+}
+
+// NewVar creates a variable identifier.
+func NewVar(name string, t *exprpb.Type) *exprpb.Decl {
+ return NewIdent(name, t, nil)
+}
+
+// NewInstanceOverload creates a instance function overload contract.
+// First element of argTypes is instance.
+func NewInstanceOverload(id string, argTypes []*exprpb.Type,
+ resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ IsInstanceFunction: true}
+}
+
+// NewListType generates a new list with elements of a certain type.
+func NewListType(elem *exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_ListType_{
+ ListType: &exprpb.Type_ListType{
+ ElemType: elem}}}
+}
+
+// NewMapType generates a new map with typed keys and values.
+func NewMapType(key *exprpb.Type, value *exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MapType_{
+ MapType: &exprpb.Type_MapType{
+ KeyType: key,
+ ValueType: value}}}
+}
+
+// NewObjectType creates an object type for a qualified type name.
+func NewObjectType(typeName string) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MessageType{
+ MessageType: typeName}}
+}
+
+// NewOverload creates a function overload declaration which contains a unique
+// overload id as well as the expected argument and result types. Overloads
+// must be aggregated within a Function declaration.
+func NewOverload(id string, argTypes []*exprpb.Type,
+ resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ IsInstanceFunction: false}
+}
+
+// NewParameterizedInstanceOverload creates a parametric function instance overload type.
+func NewParameterizedInstanceOverload(id string,
+ argTypes []*exprpb.Type,
+ resultType *exprpb.Type,
+ typeParams []string) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ TypeParams: typeParams,
+ IsInstanceFunction: true}
+}
+
+// NewParameterizedOverload creates a parametric function overload type.
+func NewParameterizedOverload(id string,
+ argTypes []*exprpb.Type,
+ resultType *exprpb.Type,
+ typeParams []string) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ TypeParams: typeParams,
+ IsInstanceFunction: false}
+}
+
+// NewPrimitiveType creates a type for a primitive value. See the var declarations
+// for Int, Uint, etc.
+func NewPrimitiveType(primitive exprpb.Type_PrimitiveType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Primitive{
+ Primitive: primitive}}
+}
+
+// NewTypeType creates a new type designating a type.
+func NewTypeType(nested *exprpb.Type) *exprpb.Type {
+ if nested == nil {
+ // must set the nested field for a valid oneof option
+ nested = &exprpb.Type{}
+ }
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Type{
+ Type: nested}}
+}
+
+// NewTypeParamType creates a type corresponding to a named, contextual parameter.
+func NewTypeParamType(name string) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_TypeParam{
+ TypeParam: name}}
+}
+
+// NewWellKnownType creates a type corresponding to a protobuf well-known type
+// value.
+func NewWellKnownType(wellKnown exprpb.Type_WellKnownType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_WellKnown{
+ WellKnown: wellKnown}}
+}
+
+// NewWrapperType creates a wrapped primitive type instance. Wrapped types
+// are roughly equivalent to a nullable, or optionally valued type.
+func NewWrapperType(wrapped *exprpb.Type) *exprpb.Type {
+ primitive := wrapped.GetPrimitive()
+ if primitive == exprpb.Type_PRIMITIVE_TYPE_UNSPECIFIED {
+ // TODO: return an error
+ panic("Wrapped type must be a primitive")
+ }
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Wrapper{
+ Wrapper: primitive}}
+}
diff --git a/vendor/github.com/google/cel-go/checker/env.go b/vendor/github.com/google/cel-go/checker/env.go
new file mode 100644
index 0000000..70682b1
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/env.go
@@ -0,0 +1,276 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/parser"
+)
+
+type aggregateLiteralElementType int
+
+const (
+ dynElementType aggregateLiteralElementType = iota
+ homogenousElementType aggregateLiteralElementType = 1 << iota
+)
+
+var (
+ crossTypeNumericComparisonOverloads = map[string]struct{}{
+ // double <-> int | uint
+ overloads.LessDoubleInt64: {},
+ overloads.LessDoubleUint64: {},
+ overloads.LessEqualsDoubleInt64: {},
+ overloads.LessEqualsDoubleUint64: {},
+ overloads.GreaterDoubleInt64: {},
+ overloads.GreaterDoubleUint64: {},
+ overloads.GreaterEqualsDoubleInt64: {},
+ overloads.GreaterEqualsDoubleUint64: {},
+ // int <-> double | uint
+ overloads.LessInt64Double: {},
+ overloads.LessInt64Uint64: {},
+ overloads.LessEqualsInt64Double: {},
+ overloads.LessEqualsInt64Uint64: {},
+ overloads.GreaterInt64Double: {},
+ overloads.GreaterInt64Uint64: {},
+ overloads.GreaterEqualsInt64Double: {},
+ overloads.GreaterEqualsInt64Uint64: {},
+ // uint <-> double | int
+ overloads.LessUint64Double: {},
+ overloads.LessUint64Int64: {},
+ overloads.LessEqualsUint64Double: {},
+ overloads.LessEqualsUint64Int64: {},
+ overloads.GreaterUint64Double: {},
+ overloads.GreaterUint64Int64: {},
+ overloads.GreaterEqualsUint64Double: {},
+ overloads.GreaterEqualsUint64Int64: {},
+ }
+)
+
+// Env is the environment for type checking.
+//
+// The Env is comprised of a container, type provider, declarations, and other related objects
+// which can be used to assist with type-checking.
+type Env struct {
+ container *containers.Container
+ provider types.Provider
+ declarations *Scopes
+ aggLitElemType aggregateLiteralElementType
+ filteredOverloadIDs map[string]struct{}
+}
+
+// NewEnv returns a new *Env with the given parameters.
+func NewEnv(container *containers.Container, provider types.Provider, opts ...Option) (*Env, error) {
+ declarations := newScopes()
+ declarations.Push()
+
+ envOptions := &options{}
+ for _, opt := range opts {
+ if err := opt(envOptions); err != nil {
+ return nil, err
+ }
+ }
+ aggLitElemType := dynElementType
+ if envOptions.homogeneousAggregateLiterals {
+ aggLitElemType = homogenousElementType
+ }
+ filteredOverloadIDs := crossTypeNumericComparisonOverloads
+ if envOptions.crossTypeNumericComparisons {
+ filteredOverloadIDs = make(map[string]struct{})
+ }
+ if envOptions.validatedDeclarations != nil {
+ declarations = envOptions.validatedDeclarations.Copy()
+ }
+ return &Env{
+ container: container,
+ provider: provider,
+ declarations: declarations,
+ aggLitElemType: aggLitElemType,
+ filteredOverloadIDs: filteredOverloadIDs,
+ }, nil
+}
+
+// AddIdents configures the checker with a list of variable declarations.
+//
+// If there are overlapping declarations, the method will error.
+func (e *Env) AddIdents(declarations ...*decls.VariableDecl) error {
+ errMsgs := make([]errorMsg, 0)
+ for _, d := range declarations {
+ errMsgs = append(errMsgs, e.addIdent(d))
+ }
+ return formatError(errMsgs)
+}
+
+// AddFunctions configures the checker with a list of function declarations.
+//
+// If there are overlapping declarations, the method will error.
+func (e *Env) AddFunctions(declarations ...*decls.FunctionDecl) error {
+ errMsgs := make([]errorMsg, 0)
+ for _, d := range declarations {
+ errMsgs = append(errMsgs, e.setFunction(d)...)
+ }
+ return formatError(errMsgs)
+}
+
+// LookupIdent returns a Decl proto for typeName as an identifier in the Env.
+// Returns nil if no such identifier is found in the Env.
+func (e *Env) LookupIdent(name string) *decls.VariableDecl {
+ for _, candidate := range e.container.ResolveCandidateNames(name) {
+ if ident := e.declarations.FindIdent(candidate); ident != nil {
+ return ident
+ }
+
+ // Next try to import the name as a reference to a message type. If found,
+ // the declaration is added to the outest (global) scope of the
+ // environment, so next time we can access it faster.
+ if t, found := e.provider.FindStructType(candidate); found {
+ decl := decls.NewVariable(candidate, t)
+ e.declarations.AddIdent(decl)
+ return decl
+ }
+
+ // Next try to import this as an enum value by splitting the name in a type prefix and
+ // the enum inside.
+ if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType {
+ decl := decls.NewConstant(candidate, types.IntType, enumValue)
+ e.declarations.AddIdent(decl)
+ return decl
+ }
+ }
+ return nil
+}
+
+// LookupFunction returns a Decl proto for typeName as a function in env.
+// Returns nil if no such function is found in env.
+func (e *Env) LookupFunction(name string) *decls.FunctionDecl {
+ for _, candidate := range e.container.ResolveCandidateNames(name) {
+ if fn := e.declarations.FindFunction(candidate); fn != nil {
+ return fn
+ }
+ }
+ return nil
+}
+
+// setFunction adds the function Decl to the Env.
+// Adds a function decl if one doesn't already exist, then adds all overloads from the Decl.
+// If overload overlaps with an existing overload, adds to the errors in the Env instead.
+func (e *Env) setFunction(fn *decls.FunctionDecl) []errorMsg {
+ errMsgs := make([]errorMsg, 0)
+ current := e.declarations.FindFunction(fn.Name())
+ if current != nil {
+ var err error
+ current, err = current.Merge(fn)
+ if err != nil {
+ return append(errMsgs, errorMsg(err.Error()))
+ }
+ } else {
+ current = fn
+ }
+ for _, overload := range current.OverloadDecls() {
+ for _, macro := range parser.AllMacros {
+ if macro.Function() == current.Name() &&
+ macro.IsReceiverStyle() == overload.IsMemberFunction() &&
+ macro.ArgCount() == len(overload.ArgTypes()) {
+ errMsgs = append(errMsgs, overlappingMacroError(current.Name(), macro.ArgCount()))
+ }
+ }
+ if len(errMsgs) > 0 {
+ return errMsgs
+ }
+ }
+ e.declarations.SetFunction(current)
+ return errMsgs
+}
+
+// addIdent adds the Decl to the declarations in the Env.
+// Returns a non-empty errorMsg if the identifier is already declared in the scope.
+func (e *Env) addIdent(decl *decls.VariableDecl) errorMsg {
+ current := e.declarations.FindIdentInScope(decl.Name())
+ if current != nil {
+ if current.DeclarationIsEquivalent(decl) {
+ return ""
+ }
+ return overlappingIdentifierError(decl.Name())
+ }
+ e.declarations.AddIdent(decl)
+ return ""
+}
+
+// isOverloadDisabled returns whether the overloadID is disabled in the current environment.
+func (e *Env) isOverloadDisabled(overloadID string) bool {
+ _, found := e.filteredOverloadIDs[overloadID]
+ return found
+}
+
+// validatedDeclarations returns a reference to the validated variable and function declaration scope stack.
+// must be copied before use.
+func (e *Env) validatedDeclarations() *Scopes {
+ return e.declarations
+}
+
+// enterScope creates a new Env instance with a new innermost declaration scope.
+func (e *Env) enterScope() *Env {
+ childDecls := e.declarations.Push()
+ return &Env{
+ declarations: childDecls,
+ container: e.container,
+ provider: e.provider,
+ aggLitElemType: e.aggLitElemType,
+ }
+}
+
+// exitScope creates a new Env instance with the nearest outer declaration scope.
+func (e *Env) exitScope() *Env {
+ parentDecls := e.declarations.Pop()
+ return &Env{
+ declarations: parentDecls,
+ container: e.container,
+ provider: e.provider,
+ aggLitElemType: e.aggLitElemType,
+ }
+}
+
+// errorMsg is a type alias meant to represent error-based return values which
+// may be accumulated into an error at a later point in execution.
+type errorMsg string
+
+func overlappingIdentifierError(name string) errorMsg {
+ return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name))
+}
+
+func overlappingMacroError(name string, argCount int) errorMsg {
+ return errorMsg(fmt.Sprintf(
+ "overlapping macro for name '%s' with %d args", name, argCount))
+}
+
+func formatError(errMsgs []errorMsg) error {
+ errStrs := make([]string, 0)
+ if len(errMsgs) > 0 {
+ for i := 0; i < len(errMsgs); i++ {
+ if errMsgs[i] != "" {
+ errStrs = append(errStrs, string(errMsgs[i]))
+ }
+ }
+ }
+ if len(errStrs) > 0 {
+ return fmt.Errorf("%s", strings.Join(errStrs, "\n"))
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/cel-go/checker/errors.go b/vendor/github.com/google/cel-go/checker/errors.go
new file mode 100644
index 0000000..c2b9649
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/errors.go
@@ -0,0 +1,92 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "reflect"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// typeErrors is a specialization of Errors.
+type typeErrors struct {
+ errs *common.Errors
+}
+
+func (e *typeErrors) fieldTypeMismatch(id int64, l common.Location, name string, field, value *types.Type) {
+ e.errs.ReportErrorAtID(id, l, "expected type of field '%s' is '%s' but provided type is '%s'",
+ name, FormatCELType(field), FormatCELType(value))
+}
+
+func (e *typeErrors) incompatibleType(id int64, l common.Location, ex *exprpb.Expr, prev, next *types.Type) {
+ e.errs.ReportErrorAtID(id, l,
+ "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next)
+}
+
+func (e *typeErrors) noMatchingOverload(id int64, l common.Location, name string, args []*types.Type, isInstance bool) {
+ signature := formatFunctionDeclType(nil, args, isInstance)
+ e.errs.ReportErrorAtID(id, l, "found no matching overload for '%s' applied to '%s'", name, signature)
+}
+
+func (e *typeErrors) notAComprehensionRange(id int64, l common.Location, t *types.Type) {
+ e.errs.ReportErrorAtID(id, l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)",
+ FormatCELType(t))
+}
+
+func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field *exprpb.Expr) {
+ e.errs.ReportErrorAtID(id, l, "unsupported optional field selection: %v", field)
+}
+
+func (e *typeErrors) notAType(id int64, l common.Location, typeName string) {
+ e.errs.ReportErrorAtID(id, l, "'%s' is not a type", typeName)
+}
+
+func (e *typeErrors) notAMessageType(id int64, l common.Location, typeName string) {
+ e.errs.ReportErrorAtID(id, l, "'%s' is not a message type", typeName)
+}
+
+func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex *exprpb.Expr, prev, next *ast.ReferenceInfo) {
+ e.errs.ReportErrorAtID(id, l,
+ "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next)
+}
+
+func (e *typeErrors) typeDoesNotSupportFieldSelection(id int64, l common.Location, t *types.Type) {
+ e.errs.ReportErrorAtID(id, l, "type '%s' does not support field selection", FormatCELType(t))
+}
+
+func (e *typeErrors) typeMismatch(id int64, l common.Location, expected, actual *types.Type) {
+ e.errs.ReportErrorAtID(id, l, "expected type '%s' but found '%s'",
+ FormatCELType(expected), FormatCELType(actual))
+}
+
+func (e *typeErrors) undefinedField(id int64, l common.Location, field string) {
+ e.errs.ReportErrorAtID(id, l, "undefined field '%s'", field)
+}
+
+func (e *typeErrors) undeclaredReference(id int64, l common.Location, container string, name string) {
+ e.errs.ReportErrorAtID(id, l, "undeclared reference to '%s' (in container '%s')", name, container)
+}
+
+func (e *typeErrors) unexpectedFailedResolution(id int64, l common.Location, typeName string) {
+ e.errs.ReportErrorAtID(id, l, "unexpected failed resolution of '%s'", typeName)
+}
+
+func (e *typeErrors) unexpectedASTType(id int64, l common.Location, ex *exprpb.Expr) {
+ e.errs.ReportErrorAtID(id, l, "unrecognized ast type: %v", reflect.TypeOf(ex))
+}
diff --git a/vendor/github.com/google/cel-go/checker/format.go b/vendor/github.com/google/cel-go/checker/format.go
new file mode 100644
index 0000000..9584290
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/format.go
@@ -0,0 +1,216 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "fmt"
+ "strings"
+
+ chkdecls "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/types"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+const (
+ kindUnknown = iota + 1
+ kindError
+ kindFunction
+ kindDyn
+ kindPrimitive
+ kindWellKnown
+ kindWrapper
+ kindNull
+ kindAbstract
+ kindType
+ kindList
+ kindMap
+ kindObject
+ kindTypeParam
+)
+
+// FormatCheckedType converts a type message into a string representation.
+func FormatCheckedType(t *exprpb.Type) string {
+ switch kindOf(t) {
+ case kindDyn:
+ return "dyn"
+ case kindFunction:
+ return formatFunctionExprType(t.GetFunction().GetResultType(),
+ t.GetFunction().GetArgTypes(),
+ false)
+ case kindList:
+ return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().GetElemType()))
+ case kindObject:
+ return t.GetMessageType()
+ case kindMap:
+ return fmt.Sprintf("map(%s, %s)",
+ FormatCheckedType(t.GetMapType().GetKeyType()),
+ FormatCheckedType(t.GetMapType().GetValueType()))
+ case kindNull:
+ return "null"
+ case kindPrimitive:
+ switch t.GetPrimitive() {
+ case exprpb.Type_UINT64:
+ return "uint"
+ case exprpb.Type_INT64:
+ return "int"
+ }
+ return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ")
+ case kindType:
+ if t.GetType() == nil || t.GetType().GetTypeKind() == nil {
+ return "type"
+ }
+ return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType()))
+ case kindWellKnown:
+ switch t.GetWellKnown() {
+ case exprpb.Type_ANY:
+ return "any"
+ case exprpb.Type_DURATION:
+ return "duration"
+ case exprpb.Type_TIMESTAMP:
+ return "timestamp"
+ }
+ case kindWrapper:
+ return fmt.Sprintf("wrapper(%s)",
+ FormatCheckedType(chkdecls.NewPrimitiveType(t.GetWrapper())))
+ case kindError:
+ return "!error!"
+ case kindTypeParam:
+ return t.GetTypeParam()
+ case kindAbstract:
+ at := t.GetAbstractType()
+ params := at.GetParameterTypes()
+ paramStrs := make([]string, len(params))
+ for i, p := range params {
+ paramStrs[i] = FormatCheckedType(p)
+ }
+ return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", "))
+ }
+ return t.String()
+}
+
+type formatter func(any) string
+
+// FormatCELType formats a types.Type value to a string representation.
+//
+// The type formatting is identical to FormatCheckedType.
+func FormatCELType(t any) string {
+ dt := t.(*types.Type)
+ switch dt.Kind() {
+ case types.AnyKind:
+ return "any"
+ case types.DurationKind:
+ return "duration"
+ case types.ErrorKind:
+ return "!error!"
+ case types.NullTypeKind:
+ return "null"
+ case types.TimestampKind:
+ return "timestamp"
+ case types.TypeParamKind:
+ return dt.TypeName()
+ case types.OpaqueKind:
+ if dt.TypeName() == "function" {
+ // There is no explicit function type in the new types representation, so information like
+ // whether the function is a member function is absent.
+ return formatFunctionDeclType(dt.Parameters()[0], dt.Parameters()[1:], false)
+ }
+ case types.UnspecifiedKind:
+ return ""
+ }
+ if len(dt.Parameters()) == 0 {
+ return dt.DeclaredTypeName()
+ }
+ paramTypeNames := make([]string, 0, len(dt.Parameters()))
+ for _, p := range dt.Parameters() {
+ paramTypeNames = append(paramTypeNames, FormatCELType(p))
+ }
+ return fmt.Sprintf("%s(%s)", dt.TypeName(), strings.Join(paramTypeNames, ", "))
+}
+
+func formatExprType(t any) string {
+ if t == nil {
+ return ""
+ }
+ return FormatCheckedType(t.(*exprpb.Type))
+}
+
+func formatFunctionExprType(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string {
+ return formatFunctionInternal[*exprpb.Type](resultType, argTypes, isInstance, formatExprType)
+}
+
+func formatFunctionDeclType(resultType *types.Type, argTypes []*types.Type, isInstance bool) string {
+ return formatFunctionInternal[*types.Type](resultType, argTypes, isInstance, FormatCELType)
+}
+
+func formatFunctionInternal[T any](resultType T, argTypes []T, isInstance bool, format formatter) string {
+ result := ""
+ if isInstance {
+ target := argTypes[0]
+ argTypes = argTypes[1:]
+ result += format(target)
+ result += "."
+ }
+ result += "("
+ for i, arg := range argTypes {
+ if i > 0 {
+ result += ", "
+ }
+ result += format(arg)
+ }
+ result += ")"
+ rt := format(resultType)
+ if rt != "" {
+ result += " -> "
+ result += rt
+ }
+ return result
+}
+
+// kindOf returns the kind of the type as defined in the checked.proto.
+func kindOf(t *exprpb.Type) int {
+ if t == nil || t.TypeKind == nil {
+ return kindUnknown
+ }
+ switch t.GetTypeKind().(type) {
+ case *exprpb.Type_Error:
+ return kindError
+ case *exprpb.Type_Function:
+ return kindFunction
+ case *exprpb.Type_Dyn:
+ return kindDyn
+ case *exprpb.Type_Primitive:
+ return kindPrimitive
+ case *exprpb.Type_WellKnown:
+ return kindWellKnown
+ case *exprpb.Type_Wrapper:
+ return kindWrapper
+ case *exprpb.Type_Null:
+ return kindNull
+ case *exprpb.Type_Type:
+ return kindType
+ case *exprpb.Type_ListType_:
+ return kindList
+ case *exprpb.Type_MapType_:
+ return kindMap
+ case *exprpb.Type_MessageType:
+ return kindObject
+ case *exprpb.Type_TypeParam:
+ return kindTypeParam
+ case *exprpb.Type_AbstractType_:
+ return kindAbstract
+ }
+ return kindUnknown
+}
diff --git a/vendor/github.com/google/cel-go/checker/mapping.go b/vendor/github.com/google/cel-go/checker/mapping.go
new file mode 100644
index 0000000..8163a90
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/mapping.go
@@ -0,0 +1,49 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/common/types"
+)
+
+type mapping struct {
+ mapping map[string]*types.Type
+}
+
+func newMapping() *mapping {
+ return &mapping{
+ mapping: make(map[string]*types.Type),
+ }
+}
+
+func (m *mapping) add(from, to *types.Type) {
+ m.mapping[FormatCELType(from)] = to
+}
+
+func (m *mapping) find(from *types.Type) (*types.Type, bool) {
+ if r, found := m.mapping[FormatCELType(from)]; found {
+ return r, found
+ }
+ return nil, false
+}
+
+func (m *mapping) copy() *mapping {
+ c := newMapping()
+
+ for k, v := range m.mapping {
+ c.mapping[k] = v
+ }
+ return c
+}
diff --git a/vendor/github.com/google/cel-go/checker/options.go b/vendor/github.com/google/cel-go/checker/options.go
new file mode 100644
index 0000000..0560c38
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/options.go
@@ -0,0 +1,42 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+type options struct {
+ crossTypeNumericComparisons bool
+ homogeneousAggregateLiterals bool
+ validatedDeclarations *Scopes
+}
+
+// Option is a functional option for configuring the type-checker
+type Option func(*options) error
+
+// CrossTypeNumericComparisons toggles type-checker support for numeric comparisons across type
+// See https://github.com/google/cel-spec/wiki/proposal-210 for more details.
+func CrossTypeNumericComparisons(enabled bool) Option {
+ return func(opts *options) error {
+ opts.crossTypeNumericComparisons = enabled
+ return nil
+ }
+}
+
+// ValidatedDeclarations provides a references to validated declarations which will be copied
+// into new checker instances.
+func ValidatedDeclarations(env *Env) Option {
+ return func(opts *options) error {
+ opts.validatedDeclarations = env.validatedDeclarations()
+ return nil
+ }
+}
diff --git a/vendor/github.com/google/cel-go/checker/printer.go b/vendor/github.com/google/cel-go/checker/printer.go
new file mode 100644
index 0000000..15cba06
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/printer.go
@@ -0,0 +1,74 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "sort"
+
+ "github.com/google/cel-go/common/debug"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+type semanticAdorner struct {
+ checks *exprpb.CheckedExpr
+}
+
+var _ debug.Adorner = &semanticAdorner{}
+
+func (a *semanticAdorner) GetMetadata(elem any) string {
+ result := ""
+ e, isExpr := elem.(*exprpb.Expr)
+ if !isExpr {
+ return result
+ }
+ t := a.checks.TypeMap[e.GetId()]
+ if t != nil {
+ result += "~"
+ result += FormatCheckedType(t)
+ }
+
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr,
+ *exprpb.Expr_CallExpr,
+ *exprpb.Expr_StructExpr,
+ *exprpb.Expr_SelectExpr:
+ if ref, found := a.checks.ReferenceMap[e.GetId()]; found {
+ if len(ref.GetOverloadId()) == 0 {
+ result += "^" + ref.Name
+ } else {
+ sort.Strings(ref.GetOverloadId())
+ for i, overload := range ref.GetOverloadId() {
+ if i == 0 {
+ result += "^"
+ } else {
+ result += "|"
+ }
+ result += overload
+ }
+ }
+ }
+ }
+
+ return result
+}
+
+// Print returns a string representation of the Expr message,
+// annotated with types from the CheckedExpr. The Expr must
+// be a sub-expression embedded in the CheckedExpr.
+func Print(e *exprpb.Expr, checks *exprpb.CheckedExpr) string {
+ a := &semanticAdorner{checks: checks}
+ return debug.ToAdornedDebugString(e, a)
+}
diff --git a/vendor/github.com/google/cel-go/checker/scopes.go b/vendor/github.com/google/cel-go/checker/scopes.go
new file mode 100644
index 0000000..8bb73dd
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/scopes.go
@@ -0,0 +1,147 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/common/decls"
+)
+
+// Scopes represents nested Decl sets where the Scopes value contains a Groups containing all
+// identifiers in scope and an optional parent representing outer scopes.
+// Each Groups value is a mapping of names to Decls in the ident and function namespaces.
+// Lookups are performed such that bindings in inner scopes shadow those in outer scopes.
+type Scopes struct {
+ parent *Scopes
+ scopes *Group
+}
+
+// newScopes creates a new, empty Scopes.
+// Some operations can't be safely performed until a Group is added with Push.
+func newScopes() *Scopes {
+ return &Scopes{
+ scopes: newGroup(),
+ }
+}
+
+// Copy creates a copy of the current Scopes values, including a copy of its parent if non-nil.
+func (s *Scopes) Copy() *Scopes {
+ cpy := newScopes()
+ if s == nil {
+ return cpy
+ }
+ if s.parent != nil {
+ cpy.parent = s.parent.Copy()
+ }
+ cpy.scopes = s.scopes.copy()
+ return cpy
+}
+
+// Push creates a new Scopes value which references the current Scope as its parent.
+func (s *Scopes) Push() *Scopes {
+ return &Scopes{
+ parent: s,
+ scopes: newGroup(),
+ }
+}
+
+// Pop returns the parent Scopes value for the current scope, or the current scope if the parent
+// is nil.
+func (s *Scopes) Pop() *Scopes {
+ if s.parent != nil {
+ return s.parent
+ }
+ // TODO: Consider whether this should be an error / panic.
+ return s
+}
+
+// AddIdent adds the ident Decl in the current scope.
+// Note: If the name collides with an existing identifier in the scope, the Decl is overwritten.
+func (s *Scopes) AddIdent(decl *decls.VariableDecl) {
+ s.scopes.idents[decl.Name()] = decl
+}
+
+// FindIdent finds the first ident Decl with a matching name in Scopes, or nil if one cannot be
+// found.
+// Note: The search is performed from innermost to outermost.
+func (s *Scopes) FindIdent(name string) *decls.VariableDecl {
+ if ident, found := s.scopes.idents[name]; found {
+ return ident
+ }
+ if s.parent != nil {
+ return s.parent.FindIdent(name)
+ }
+ return nil
+}
+
+// FindIdentInScope finds the first ident Decl with a matching name in the current Scopes value, or
+// nil if one does not exist.
+// Note: The search is only performed on the current scope and does not search outer scopes.
+func (s *Scopes) FindIdentInScope(name string) *decls.VariableDecl {
+ if ident, found := s.scopes.idents[name]; found {
+ return ident
+ }
+ return nil
+}
+
+// SetFunction adds the function Decl to the current scope.
+// Note: Any previous entry for a function in the current scope with the same name is overwritten.
+func (s *Scopes) SetFunction(fn *decls.FunctionDecl) {
+ s.scopes.functions[fn.Name()] = fn
+}
+
+// FindFunction finds the first function Decl with a matching name in Scopes.
+// The search is performed from innermost to outermost.
+// Returns nil if no such function in Scopes.
+func (s *Scopes) FindFunction(name string) *decls.FunctionDecl {
+ if fn, found := s.scopes.functions[name]; found {
+ return fn
+ }
+ if s.parent != nil {
+ return s.parent.FindFunction(name)
+ }
+ return nil
+}
+
+// Group is a set of Decls that is pushed on or popped off a Scopes as a unit.
+// Contains separate namespaces for identifier and function Decls.
+// (Should be named "Scope" perhaps?)
+type Group struct {
+ idents map[string]*decls.VariableDecl
+ functions map[string]*decls.FunctionDecl
+}
+
+// copy creates a new Group instance with a shallow copy of the variables and functions.
+// If callers need to mutate the exprpb.Decl definitions for a Function, they should copy-on-write.
+func (g *Group) copy() *Group {
+ cpy := &Group{
+ idents: make(map[string]*decls.VariableDecl, len(g.idents)),
+ functions: make(map[string]*decls.FunctionDecl, len(g.functions)),
+ }
+ for n, id := range g.idents {
+ cpy.idents[n] = id
+ }
+ for n, fn := range g.functions {
+ cpy.functions[n] = fn
+ }
+ return cpy
+}
+
+// newGroup creates a new Group with empty maps for identifiers and functions.
+func newGroup() *Group {
+ return &Group{
+ idents: make(map[string]*decls.VariableDecl),
+ functions: make(map[string]*decls.FunctionDecl),
+ }
+}
diff --git a/vendor/github.com/google/cel-go/checker/standard.go b/vendor/github.com/google/cel-go/checker/standard.go
new file mode 100644
index 0000000..11b35b8
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/standard.go
@@ -0,0 +1,35 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/common/stdlib"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// StandardFunctions returns the Decls for all functions in the evaluator.
+//
+// Deprecated: prefer stdlib.FunctionExprDecls()
+func StandardFunctions() []*exprpb.Decl {
+ return stdlib.FunctionExprDecls()
+}
+
+// StandardTypes returns the set of type identifiers for standard library types.
+//
+// Deprecated: prefer stdlib.TypeExprDecls()
+func StandardTypes() []*exprpb.Decl {
+ return stdlib.TypeExprDecls()
+}
diff --git a/vendor/github.com/google/cel-go/checker/types.go b/vendor/github.com/google/cel-go/checker/types.go
new file mode 100644
index 0000000..e2373d1
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/types.go
@@ -0,0 +1,309 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/common/types"
+)
+
+// isDyn returns true if the input t is either type DYN or a well-known ANY message.
+func isDyn(t *types.Type) bool {
+ // Note: object type values that are well-known and map to a DYN value in practice
+ // are sanitized prior to being added to the environment.
+ switch t.Kind() {
+ case types.DynKind, types.AnyKind:
+ return true
+ default:
+ return false
+ }
+}
+
+// isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message.
+func isDynOrError(t *types.Type) bool {
+ return isError(t) || isDyn(t)
+}
+
+func isError(t *types.Type) bool {
+ return t.Kind() == types.ErrorKind
+}
+
+func isOptional(t *types.Type) bool {
+ if t.Kind() == types.OpaqueKind {
+ return t.TypeName() == "optional"
+ }
+ return false
+}
+
+func maybeUnwrapOptional(t *types.Type) (*types.Type, bool) {
+ if isOptional(t) {
+ return t.Parameters()[0], true
+ }
+ return t, false
+}
+
+// isEqualOrLessSpecific checks whether one type is equal or less specific than the other one.
+// A type is less specific if it matches the other type using the DYN type.
+func isEqualOrLessSpecific(t1, t2 *types.Type) bool {
+ kind1, kind2 := t1.Kind(), t2.Kind()
+ // The first type is less specific.
+ if isDyn(t1) || kind1 == types.TypeParamKind {
+ return true
+ }
+ // The first type is not less specific.
+ if isDyn(t2) || kind2 == types.TypeParamKind {
+ return false
+ }
+ // Types must be of the same kind to be equal.
+ if kind1 != kind2 {
+ return false
+ }
+
+ // With limited exceptions for ANY and JSON values, the types must agree and be equivalent in
+ // order to return true.
+ switch kind1 {
+ case types.OpaqueKind:
+ if t1.TypeName() != t2.TypeName() ||
+ len(t1.Parameters()) != len(t2.Parameters()) {
+ return false
+ }
+ for i, p1 := range t1.Parameters() {
+ if !isEqualOrLessSpecific(p1, t2.Parameters()[i]) {
+ return false
+ }
+ }
+ return true
+ case types.ListKind:
+ return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0])
+ case types.MapKind:
+ return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0]) &&
+ isEqualOrLessSpecific(t1.Parameters()[1], t2.Parameters()[1])
+ case types.TypeKind:
+ return true
+ default:
+ return t1.IsExactType(t2)
+ }
+}
+
+// / internalIsAssignable returns true if t1 is assignable to t2.
+func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool {
+ // Process type parameters.
+ kind1, kind2 := t1.Kind(), t2.Kind()
+ if kind2 == types.TypeParamKind {
+ // If t2 is a valid type substitution for t1, return true.
+ valid, t2HasSub := isValidTypeSubstitution(m, t1, t2)
+ if valid {
+ return true
+ }
+ // If t2 is not a valid type sub for t1, and already has a known substitution return false
+ // since it is not possible for t1 to be a substitution for t2.
+ if !valid && t2HasSub {
+ return false
+ }
+ // Otherwise, fall through to check whether t1 is a possible substitution for t2.
+ }
+ if kind1 == types.TypeParamKind {
+ // Return whether t1 is a valid substitution for t2. If not, do no additional checks as the
+ // possible type substitutions have been searched in both directions.
+ valid, _ := isValidTypeSubstitution(m, t2, t1)
+ return valid
+ }
+
+ // Next check for wildcard types.
+ if isDynOrError(t1) || isDynOrError(t2) {
+ return true
+ }
+ // Preserve the nullness checks of the legacy type-checker.
+ if kind1 == types.NullTypeKind {
+ return internalIsAssignableNull(t2)
+ }
+ if kind2 == types.NullTypeKind {
+ return internalIsAssignableNull(t1)
+ }
+
+ // Test for when the types do not need to agree, but are more specific than dyn.
+ switch kind1 {
+ case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind,
+ types.AnyKind, types.DurationKind, types.TimestampKind,
+ types.StructKind:
+ return t1.IsAssignableType(t2)
+ case types.TypeKind:
+ return kind2 == types.TypeKind
+ case types.OpaqueKind, types.ListKind, types.MapKind:
+ return t1.Kind() == t2.Kind() && t1.TypeName() == t2.TypeName() &&
+ internalIsAssignableList(m, t1.Parameters(), t2.Parameters())
+ default:
+ return false
+ }
+}
+
+// isValidTypeSubstitution returns whether t2 (or its type substitution) is a valid type
+// substitution for t1, and whether t2 has a type substitution in mapping m.
+//
+// The type t2 is a valid substitution for t1 if any of the following statements is true
+// - t2 has a type substitution (t2sub) equal to t1
+// - t2 has a type substitution (t2sub) assignable to t1
+// - t2 does not occur within t1.
+func isValidTypeSubstitution(m *mapping, t1, t2 *types.Type) (valid, hasSub bool) {
+ // Early return if the t1 and t2 are the same instance.
+ kind1, kind2 := t1.Kind(), t2.Kind()
+ if kind1 == kind2 && t1.IsExactType(t2) {
+ return true, true
+ }
+ if t2Sub, found := m.find(t2); found {
+ // Early return if t1 and t2Sub are the same instance as otherwise the mapping
+ // might mark a type as being a subtitution for itself.
+ if kind1 == t2Sub.Kind() && t1.IsExactType(t2Sub) {
+ return true, true
+ }
+ // If the types are compatible, pick the more general type and return true
+ if internalIsAssignable(m, t1, t2Sub) {
+ t2New := mostGeneral(t1, t2Sub)
+ // only update the type reference map if the target type does not occur within it.
+ if notReferencedIn(m, t2, t2New) {
+ m.add(t2, t2New)
+ }
+ // acknowledge the type agreement, and that the substitution is already tracked.
+ return true, true
+ }
+ return false, true
+ }
+ if notReferencedIn(m, t2, t1) {
+ m.add(t2, t1)
+ return true, false
+ }
+ return false, false
+}
+
+// internalIsAssignableList returns true if the element types at each index in the list are
+// assignable from l1[i] to l2[i]. The list lengths must also agree for the lists to be
+// assignable.
+func internalIsAssignableList(m *mapping, l1, l2 []*types.Type) bool {
+ if len(l1) != len(l2) {
+ return false
+ }
+ for i, t1 := range l1 {
+ if !internalIsAssignable(m, t1, l2[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// internalIsAssignableNull returns true if the type is nullable.
+func internalIsAssignableNull(t *types.Type) bool {
+ return isLegacyNullable(t) || t.IsAssignableType(types.NullType)
+}
+
+// isLegacyNullable preserves the null-ness compatibility of the original type-checker implementation.
+func isLegacyNullable(t *types.Type) bool {
+ switch t.Kind() {
+ case types.OpaqueKind, types.StructKind, types.AnyKind, types.DurationKind, types.TimestampKind:
+ return true
+ }
+ return false
+}
+
+// isAssignable returns an updated type substitution mapping if t1 is assignable to t2.
+func isAssignable(m *mapping, t1, t2 *types.Type) *mapping {
+ mCopy := m.copy()
+ if internalIsAssignable(mCopy, t1, t2) {
+ return mCopy
+ }
+ return nil
+}
+
+// isAssignableList returns an updated type substitution mapping if l1 is assignable to l2.
+func isAssignableList(m *mapping, l1, l2 []*types.Type) *mapping {
+ mCopy := m.copy()
+ if internalIsAssignableList(mCopy, l1, l2) {
+ return mCopy
+ }
+ return nil
+}
+
+// mostGeneral returns the more general of two types which are known to unify.
+func mostGeneral(t1, t2 *types.Type) *types.Type {
+ if isEqualOrLessSpecific(t1, t2) {
+ return t1
+ }
+ return t2
+}
+
+// notReferencedIn checks whether the type doesn't appear directly or transitively within the other
+// type. This is a standard requirement for type unification, commonly referred to as the "occurs
+// check".
+func notReferencedIn(m *mapping, t, withinType *types.Type) bool {
+ if t.IsExactType(withinType) {
+ return false
+ }
+ withinKind := withinType.Kind()
+ switch withinKind {
+ case types.TypeParamKind:
+ wtSub, found := m.find(withinType)
+ if !found {
+ return true
+ }
+ return notReferencedIn(m, t, wtSub)
+ case types.OpaqueKind, types.ListKind, types.MapKind:
+ for _, pt := range withinType.Parameters() {
+ if !notReferencedIn(m, t, pt) {
+ return false
+ }
+ }
+ return true
+ default:
+ return true
+ }
+}
+
+// substitute replaces all direct and indirect occurrences of bound type parameters. Unbound type
+// parameters are replaced by DYN if typeParamToDyn is true.
+func substitute(m *mapping, t *types.Type, typeParamToDyn bool) *types.Type {
+ if tSub, found := m.find(t); found {
+ return substitute(m, tSub, typeParamToDyn)
+ }
+ kind := t.Kind()
+ if typeParamToDyn && kind == types.TypeParamKind {
+ return types.DynType
+ }
+ switch kind {
+ case types.OpaqueKind:
+ return types.NewOpaqueType(t.TypeName(), substituteParams(m, t.Parameters(), typeParamToDyn)...)
+ case types.ListKind:
+ return types.NewListType(substitute(m, t.Parameters()[0], typeParamToDyn))
+ case types.MapKind:
+ return types.NewMapType(substitute(m, t.Parameters()[0], typeParamToDyn),
+ substitute(m, t.Parameters()[1], typeParamToDyn))
+ case types.TypeKind:
+ if len(t.Parameters()) > 0 {
+ return types.NewTypeTypeWithParam(substitute(m, t.Parameters()[0], typeParamToDyn))
+ }
+ return t
+ default:
+ return t
+ }
+}
+
+func substituteParams(m *mapping, typeParams []*types.Type, typeParamToDyn bool) []*types.Type {
+ subParams := make([]*types.Type, len(typeParams))
+ for i, tp := range typeParams {
+ subParams[i] = substitute(m, tp, typeParamToDyn)
+ }
+ return subParams
+}
+
+func newFunctionType(resultType *types.Type, argTypes ...*types.Type) *types.Type {
+ return types.NewOpaqueType("function", append([]*types.Type{resultType}, argTypes...)...)
+}
diff --git a/vendor/github.com/google/cel-go/common/BUILD.bazel b/vendor/github.com/google/cel-go/common/BUILD.bazel
new file mode 100644
index 0000000..d6165b1
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/BUILD.bazel
@@ -0,0 +1,35 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "cost.go",
+ "error.go",
+ "errors.go",
+ "location.go",
+ "source.go",
+ ],
+ importpath = "github.com/google/cel-go/common",
+ deps = [
+ "//common/runes:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_x_text//width:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "errors_test.go",
+ "source_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel
new file mode 100644
index 0000000..7269cdf
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel
@@ -0,0 +1,52 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = [
+ "//cel:__subpackages__",
+ "//checker:__subpackages__",
+ "//common:__subpackages__",
+ "//interpreter:__subpackages__",
+ ],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "ast.go",
+ "expr.go",
+ ],
+ importpath = "github.com/google/cel-go/common/ast",
+ deps = [
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ srcs = [
+ "ast_test.go",
+ "expr_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//checker:go_default_library",
+ "//checker/decls:go_default_library",
+ "//common:go_default_library",
+ "//common/containers:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/stdlib:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//parser:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ ],
+)
\ No newline at end of file
diff --git a/vendor/github.com/google/cel-go/common/ast/ast.go b/vendor/github.com/google/cel-go/common/ast/ast.go
new file mode 100644
index 0000000..b3c1507
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/ast/ast.go
@@ -0,0 +1,226 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ast declares data structures useful for parsed and checked abstract syntax trees
+package ast
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ structpb "google.golang.org/protobuf/types/known/structpb"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// CheckedAST contains a protobuf expression and source info along with CEL-native type and reference information.
+type CheckedAST struct {
+ Expr *exprpb.Expr
+ SourceInfo *exprpb.SourceInfo
+ TypeMap map[int64]*types.Type
+ ReferenceMap map[int64]*ReferenceInfo
+}
+
+// CheckedASTToCheckedExpr converts a CheckedAST to a CheckedExpr protobouf.
+func CheckedASTToCheckedExpr(ast *CheckedAST) (*exprpb.CheckedExpr, error) {
+ refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap))
+ for id, ref := range ast.ReferenceMap {
+ r, err := ReferenceInfoToReferenceExpr(ref)
+ if err != nil {
+ return nil, err
+ }
+ refMap[id] = r
+ }
+ typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap))
+ for id, typ := range ast.TypeMap {
+ t, err := types.TypeToExprType(typ)
+ if err != nil {
+ return nil, err
+ }
+ typeMap[id] = t
+ }
+ return &exprpb.CheckedExpr{
+ Expr: ast.Expr,
+ SourceInfo: ast.SourceInfo,
+ ReferenceMap: refMap,
+ TypeMap: typeMap,
+ }, nil
+}
+
+// CheckedExprToCheckedAST converts a CheckedExpr protobuf to a CheckedAST instance.
+func CheckedExprToCheckedAST(checked *exprpb.CheckedExpr) (*CheckedAST, error) {
+ refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap()))
+ for id, ref := range checked.GetReferenceMap() {
+ r, err := ReferenceExprToReferenceInfo(ref)
+ if err != nil {
+ return nil, err
+ }
+ refMap[id] = r
+ }
+ typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap()))
+ for id, typ := range checked.GetTypeMap() {
+ t, err := types.ExprTypeToType(typ)
+ if err != nil {
+ return nil, err
+ }
+ typeMap[id] = t
+ }
+ return &CheckedAST{
+ Expr: checked.GetExpr(),
+ SourceInfo: checked.GetSourceInfo(),
+ ReferenceMap: refMap,
+ TypeMap: typeMap,
+ }, nil
+}
+
+// ReferenceInfo contains a CEL native representation of an identifier reference which may refer to
+// either a qualified identifier name, a set of overload ids, or a constant value from an enum.
+type ReferenceInfo struct {
+ Name string
+ OverloadIDs []string
+ Value ref.Val
+}
+
+// NewIdentReference creates a ReferenceInfo instance for an identifier with an optional constant value.
+func NewIdentReference(name string, value ref.Val) *ReferenceInfo {
+ return &ReferenceInfo{Name: name, Value: value}
+}
+
+// NewFunctionReference creates a ReferenceInfo instance for a set of function overloads.
+func NewFunctionReference(overloads ...string) *ReferenceInfo {
+ info := &ReferenceInfo{}
+ for _, id := range overloads {
+ info.AddOverload(id)
+ }
+ return info
+}
+
+// AddOverload appends a function overload ID to the ReferenceInfo.
+func (r *ReferenceInfo) AddOverload(overloadID string) {
+ for _, id := range r.OverloadIDs {
+ if id == overloadID {
+ return
+ }
+ }
+ r.OverloadIDs = append(r.OverloadIDs, overloadID)
+}
+
+// Equals returns whether two references are identical to each other.
+func (r *ReferenceInfo) Equals(other *ReferenceInfo) bool {
+ if r.Name != other.Name {
+ return false
+ }
+ if len(r.OverloadIDs) != len(other.OverloadIDs) {
+ return false
+ }
+ if len(r.OverloadIDs) != 0 {
+ overloadMap := make(map[string]struct{}, len(r.OverloadIDs))
+ for _, id := range r.OverloadIDs {
+ overloadMap[id] = struct{}{}
+ }
+ for _, id := range other.OverloadIDs {
+ _, found := overloadMap[id]
+ if !found {
+ return false
+ }
+ }
+ }
+ if r.Value == nil && other.Value == nil {
+ return true
+ }
+ if r.Value == nil && other.Value != nil ||
+ r.Value != nil && other.Value == nil ||
+ r.Value.Equal(other.Value) != types.True {
+ return false
+ }
+ return true
+}
+
+// ReferenceInfoToReferenceExpr converts a ReferenceInfo instance to a protobuf Reference suitable for serialization.
+func ReferenceInfoToReferenceExpr(info *ReferenceInfo) (*exprpb.Reference, error) {
+ c, err := ValToConstant(info.Value)
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Reference{
+ Name: info.Name,
+ OverloadId: info.OverloadIDs,
+ Value: c,
+ }, nil
+}
+
+// ReferenceExprToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance.
+func ReferenceExprToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) {
+ v, err := ConstantToVal(ref.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return &ReferenceInfo{
+ Name: ref.GetName(),
+ OverloadIDs: ref.GetOverloadId(),
+ Value: v,
+ }, nil
+}
+
+// ValToConstant converts a CEL-native ref.Val to a protobuf Constant.
+//
+// Only simple scalar types are supported by this method.
+func ValToConstant(v ref.Val) (*exprpb.Constant, error) {
+ if v == nil {
+ return nil, nil
+ }
+ switch v.Type() {
+ case types.BoolType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil
+ case types.BytesType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil
+ case types.DoubleType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil
+ case types.IntType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil
+ case types.NullType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil
+ case types.StringType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil
+ case types.UintType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil
+ }
+ return nil, fmt.Errorf("unsupported constant kind: %v", v.Type())
+}
+
+// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val.
+func ConstantToVal(c *exprpb.Constant) (ref.Val, error) {
+ if c == nil {
+ return nil, nil
+ }
+ switch c.GetConstantKind().(type) {
+ case *exprpb.Constant_BoolValue:
+ return types.Bool(c.GetBoolValue()), nil
+ case *exprpb.Constant_BytesValue:
+ return types.Bytes(c.GetBytesValue()), nil
+ case *exprpb.Constant_DoubleValue:
+ return types.Double(c.GetDoubleValue()), nil
+ case *exprpb.Constant_Int64Value:
+ return types.Int(c.GetInt64Value()), nil
+ case *exprpb.Constant_NullValue:
+ return types.NullValue, nil
+ case *exprpb.Constant_StringValue:
+ return types.String(c.GetStringValue()), nil
+ case *exprpb.Constant_Uint64Value:
+ return types.Uint(c.GetUint64Value()), nil
+ }
+ return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind())
+}
diff --git a/vendor/github.com/google/cel-go/common/ast/expr.go b/vendor/github.com/google/cel-go/common/ast/expr.go
new file mode 100644
index 0000000..b63884a
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/ast/expr.go
@@ -0,0 +1,709 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+import (
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// ExprKind represents the expression node kind.
+type ExprKind int
+
+const (
+ // UnspecifiedKind represents an unset expression with no specified properties.
+ UnspecifiedKind ExprKind = iota
+
+ // LiteralKind represents a primitive scalar literal.
+ LiteralKind
+
+ // IdentKind represents a simple variable, constant, or type identifier.
+ IdentKind
+
+ // SelectKind represents a field selection expression.
+ SelectKind
+
+ // CallKind represents a function call.
+ CallKind
+
+ // ListKind represents a list literal expression.
+ ListKind
+
+ // MapKind represents a map literal expression.
+ MapKind
+
+ // StructKind represents a struct literal expression.
+ StructKind
+
+ // ComprehensionKind represents a comprehension expression generated by a macro.
+ ComprehensionKind
+)
+
+// NavigateCheckedAST converts a CheckedAST to a NavigableExpr
+func NavigateCheckedAST(ast *CheckedAST) NavigableExpr {
+ return newNavigableExpr(nil, ast.Expr, ast.TypeMap)
+}
+
+// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match.
+//
+// This function type should be use with the `Match` and `MatchList` calls.
+type ExprMatcher func(NavigableExpr) bool
+
+// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr
+// is comprised of all constant values, such as a simple literal or even list and map literal.
+func ConstantValueMatcher() ExprMatcher {
+ return matchIsConstantValue
+}
+
+// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches
+// the specified `kind`.
+func KindMatcher(kind ExprKind) ExprMatcher {
+ return func(e NavigableExpr) bool {
+ return e.Kind() == kind
+ }
+}
+
+// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose
+// function name is equal to `funcName`.
+func FunctionMatcher(funcName string) ExprMatcher {
+ return func(e NavigableExpr) bool {
+ if e.Kind() != CallKind {
+ return false
+ }
+ return e.AsCall().FunctionName() == funcName
+ }
+}
+
+// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list.
+//
+// Such a result would work well with subsequent MatchList calls.
+func AllMatcher() ExprMatcher {
+ return func(NavigableExpr) bool {
+ return true
+ }
+}
+
+// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values of the
+// descendants which match.
+func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr {
+ return matchListInternal([]NavigableExpr{expr}, matcher, true)
+}
+
+// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a
+// subset of NavigableExpr values which match.
+func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr {
+ visit := make([]NavigableExpr, len(exprs))
+ copy(visit, exprs)
+ return matchListInternal(visit, matcher, false)
+}
+
+func matchListInternal(visit []NavigableExpr, matcher ExprMatcher, visitDescendants bool) []NavigableExpr {
+ var matched []NavigableExpr
+ for len(visit) != 0 {
+ e := visit[0]
+ if matcher(e) {
+ matched = append(matched, e)
+ }
+ if visitDescendants {
+ visit = append(visit[1:], e.Children()...)
+ } else {
+ visit = visit[1:]
+ }
+ }
+ return matched
+}
+
+func matchIsConstantValue(e NavigableExpr) bool {
+ if e.Kind() == LiteralKind {
+ return true
+ }
+ if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind {
+ for _, child := range e.Children() {
+ if !matchIsConstantValue(child) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+// NavigableExpr represents the base navigable expression value.
+//
+// Depending on the `Kind()` value, the NavigableExpr may be converted to a concrete expression types
+// as indicated by the `As` methods.
+//
+// NavigableExpr values and their concrete expression types should be nil-safe. Conversion of an expr
+// to the wrong kind should produce a nil value.
+type NavigableExpr interface {
+ // ID of the expression as it appears in the AST
+ ID() int64
+
+ // Kind of the expression node. See ExprKind for the valid enum values.
+ Kind() ExprKind
+
+ // Type of the expression node.
+ Type() *types.Type
+
+ // Parent returns the parent expression node, if one exists.
+ Parent() (NavigableExpr, bool)
+
+ // Children returns a list of child expression nodes.
+ Children() []NavigableExpr
+
+ // ToExpr adapts this NavigableExpr to a protobuf representation.
+ ToExpr() *exprpb.Expr
+
+ // AsCall adapts the expr into a NavigableCallExpr
+ //
+ // The Kind() must be equal to a CallKind for the conversion to be well-defined.
+ AsCall() NavigableCallExpr
+
+ // AsComprehension adapts the expr into a NavigableComprehensionExpr.
+ //
+ // The Kind() must be equal to a ComprehensionKind for the conversion to be well-defined.
+ AsComprehension() NavigableComprehensionExpr
+
+ // AsIdent adapts the expr into an identifier string.
+ //
+ // The Kind() must be equal to an IdentKind for the conversion to be well-defined.
+ AsIdent() string
+
+ // AsLiteral adapts the expr into a constant ref.Val.
+ //
+ // The Kind() must be equal to a LiteralKind for the conversion to be well-defined.
+ AsLiteral() ref.Val
+
+ // AsList adapts the expr into a NavigableListExpr.
+ //
+ // The Kind() must be equal to a ListKind for the conversion to be well-defined.
+ AsList() NavigableListExpr
+
+ // AsMap adapts the expr into a NavigableMapExpr.
+ //
+ // The Kind() must be equal to a MapKind for the conversion to be well-defined.
+ AsMap() NavigableMapExpr
+
+ // AsSelect adapts the expr into a NavigableSelectExpr.
+ //
+ // The Kind() must be equal to a SelectKind for the conversion to be well-defined.
+ AsSelect() NavigableSelectExpr
+
+ // AsStruct adapts the expr into a NavigableStructExpr.
+ //
+ // The Kind() must be equal to a StructKind for the conversion to be well-defined.
+ AsStruct() NavigableStructExpr
+
+ // marker interface method
+ isNavigable()
+}
+
+// NavigableCallExpr defines an interface for inspecting a function call and its arugments.
+type NavigableCallExpr interface {
+ // FunctionName returns the name of the function.
+ FunctionName() string
+
+ // Target returns the target of the expression if one is present.
+ Target() NavigableExpr
+
+ // Args returns the list of call arguments, excluding the target.
+ Args() []NavigableExpr
+
+ // ReturnType returns the result type of the call.
+ ReturnType() *types.Type
+
+ // marker interface method
+ isNavigable()
+}
+
+// NavigableListExpr defines an interface for inspecting a list literal expression.
+type NavigableListExpr interface {
+ // Elements returns the list elements as navigable expressions.
+ Elements() []NavigableExpr
+
+ // OptionalIndicies returns the list of optional indices in the list literal.
+ OptionalIndices() []int32
+
+ // Size returns the number of elements in the list.
+ Size() int
+
+ // marker interface method
+ isNavigable()
+}
+
+// NavigableSelectExpr defines an interface for inspecting a select expression.
+type NavigableSelectExpr interface {
+ // Operand returns the selection operand expression.
+ Operand() NavigableExpr
+
+ // FieldName returns the field name being selected from the operand.
+ FieldName() string
+
+ // IsTestOnly indicates whether the select expression is a presence test generated by a macro.
+ IsTestOnly() bool
+
+ // marker interface method
+ isNavigable()
+}
+
+// NavigableMapExpr defines an interface for inspecting a map expression.
+type NavigableMapExpr interface {
+ // Entries returns the map key value pairs as NavigableEntry values.
+ Entries() []NavigableEntry
+
+ // Size returns the number of entries in the map.
+ Size() int
+
+ // marker interface method
+ isNavigable()
+}
+
+// NavigableEntry defines an interface for inspecting a map entry.
+type NavigableEntry interface {
+ // Key returns the map entry key expression.
+ Key() NavigableExpr
+
+ // Value returns the map entry value expression.
+ Value() NavigableExpr
+
+ // IsOptional returns whether the entry is optional.
+ IsOptional() bool
+
+ // marker interface method
+ isNavigable()
+}
+
+// NavigableStructExpr defines an interfaces for inspecting a struct and its field initializers.
+type NavigableStructExpr interface {
+ // TypeName returns the struct type name.
+ TypeName() string
+
+ // Fields returns the set of field initializers in the struct expression as NavigableField values.
+ Fields() []NavigableField
+
+ // marker interface method
+ isNavigable()
+}
+
+// NavigableField defines an interface for inspecting a struct field initialization.
+type NavigableField interface {
+ // FieldName returns the name of the field.
+ FieldName() string
+
+ // Value returns the field initialization expression.
+ Value() NavigableExpr
+
+ // IsOptional returns whether the field is optional.
+ IsOptional() bool
+
+ // marker interface method
+ isNavigable()
+}
+
+// NavigableComprehensionExpr defines an interface for inspecting a comprehension expression.
+type NavigableComprehensionExpr interface {
+ // IterRange returns the iteration range expression.
+ IterRange() NavigableExpr
+
+ // IterVar returns the iteration variable name.
+ IterVar() string
+
+ // AccuVar returns the accumulation variable name.
+ AccuVar() string
+
+ // AccuInit returns the accumulation variable initialization expression.
+ AccuInit() NavigableExpr
+
+ // LoopCondition returns the loop condition expression.
+ LoopCondition() NavigableExpr
+
+ // LoopStep returns the loop step expression.
+ LoopStep() NavigableExpr
+
+ // Result returns the comprehension result expression.
+ Result() NavigableExpr
+
+ // marker interface method
+ isNavigable()
+}
+
+func newNavigableExpr(parent NavigableExpr, expr *exprpb.Expr, typeMap map[int64]*types.Type) NavigableExpr {
+ kind, factory := kindOf(expr)
+ nav := &navigableExprImpl{
+ parent: parent,
+ kind: kind,
+ expr: expr,
+ typeMap: typeMap,
+ createChildren: factory,
+ }
+ return nav
+}
+
+type navigableExprImpl struct {
+ parent NavigableExpr
+ kind ExprKind
+ expr *exprpb.Expr
+ typeMap map[int64]*types.Type
+ createChildren childFactory
+}
+
+func (nav *navigableExprImpl) ID() int64 {
+ return nav.ToExpr().GetId()
+}
+
+func (nav *navigableExprImpl) Kind() ExprKind {
+ return nav.kind
+}
+
+func (nav *navigableExprImpl) Type() *types.Type {
+ if t, found := nav.typeMap[nav.ID()]; found {
+ return t
+ }
+ return types.DynType
+}
+
+func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) {
+ if nav.parent != nil {
+ return nav.parent, true
+ }
+ return nil, false
+}
+
+func (nav *navigableExprImpl) Children() []NavigableExpr {
+ return nav.createChildren(nav)
+}
+
+func (nav *navigableExprImpl) ToExpr() *exprpb.Expr {
+ return nav.expr
+}
+
+func (nav *navigableExprImpl) AsCall() NavigableCallExpr {
+ return navigableCallImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsComprehension() NavigableComprehensionExpr {
+ return navigableComprehensionImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsIdent() string {
+ return nav.ToExpr().GetIdentExpr().GetName()
+}
+
+func (nav *navigableExprImpl) AsLiteral() ref.Val {
+ if nav.Kind() != LiteralKind {
+ return nil
+ }
+ val, err := ConstantToVal(nav.ToExpr().GetConstExpr())
+ if err != nil {
+ panic(err)
+ }
+ return val
+}
+
+func (nav *navigableExprImpl) AsList() NavigableListExpr {
+ return navigableListImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsMap() NavigableMapExpr {
+ return navigableMapImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsSelect() NavigableSelectExpr {
+ return navigableSelectImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsStruct() NavigableStructExpr {
+ return navigableStructImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) createChild(e *exprpb.Expr) NavigableExpr {
+ return newNavigableExpr(nav, e, nav.typeMap)
+}
+
+func (nav *navigableExprImpl) isNavigable() {}
+
+type navigableCallImpl struct {
+ *navigableExprImpl
+}
+
+func (call navigableCallImpl) FunctionName() string {
+ return call.ToExpr().GetCallExpr().GetFunction()
+}
+
+func (call navigableCallImpl) Target() NavigableExpr {
+ t := call.ToExpr().GetCallExpr().GetTarget()
+ if t != nil {
+ return call.createChild(t)
+ }
+ return nil
+}
+
+func (call navigableCallImpl) Args() []NavigableExpr {
+ args := call.ToExpr().GetCallExpr().GetArgs()
+ navArgs := make([]NavigableExpr, len(args))
+ for i, a := range args {
+ navArgs[i] = call.createChild(a)
+ }
+ return navArgs
+}
+
+func (call navigableCallImpl) ReturnType() *types.Type {
+ return call.Type()
+}
+
+type navigableComprehensionImpl struct {
+ *navigableExprImpl
+}
+
+func (comp navigableComprehensionImpl) IterRange() NavigableExpr {
+ return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetIterRange())
+}
+
+func (comp navigableComprehensionImpl) IterVar() string {
+ return comp.ToExpr().GetComprehensionExpr().GetIterVar()
+}
+
+func (comp navigableComprehensionImpl) AccuVar() string {
+ return comp.ToExpr().GetComprehensionExpr().GetAccuVar()
+}
+
+func (comp navigableComprehensionImpl) AccuInit() NavigableExpr {
+ return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetAccuInit())
+}
+
+func (comp navigableComprehensionImpl) LoopCondition() NavigableExpr {
+ return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopCondition())
+}
+
+func (comp navigableComprehensionImpl) LoopStep() NavigableExpr {
+ return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopStep())
+}
+
+func (comp navigableComprehensionImpl) Result() NavigableExpr {
+ return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetResult())
+}
+
+type navigableListImpl struct {
+ *navigableExprImpl
+}
+
+func (l navigableListImpl) Elements() []NavigableExpr {
+ return l.Children()
+}
+
+func (l navigableListImpl) OptionalIndices() []int32 {
+ return l.ToExpr().GetListExpr().GetOptionalIndices()
+}
+
+func (l navigableListImpl) Size() int {
+ return len(l.ToExpr().GetListExpr().GetElements())
+}
+
+type navigableMapImpl struct {
+ *navigableExprImpl
+}
+
+func (m navigableMapImpl) Entries() []NavigableEntry {
+ mapExpr := m.ToExpr().GetStructExpr()
+ entries := make([]NavigableEntry, len(mapExpr.GetEntries()))
+ for i, e := range mapExpr.GetEntries() {
+ entries[i] = navigableEntryImpl{
+ key: m.createChild(e.GetMapKey()),
+ val: m.createChild(e.GetValue()),
+ isOpt: e.GetOptionalEntry(),
+ }
+ }
+ return entries
+}
+
+func (m navigableMapImpl) Size() int {
+ return len(m.ToExpr().GetStructExpr().GetEntries())
+}
+
+type navigableEntryImpl struct {
+ key NavigableExpr
+ val NavigableExpr
+ isOpt bool
+}
+
+func (e navigableEntryImpl) Key() NavigableExpr {
+ return e.key
+}
+
+func (e navigableEntryImpl) Value() NavigableExpr {
+ return e.val
+}
+
+func (e navigableEntryImpl) IsOptional() bool {
+ return e.isOpt
+}
+
+func (e navigableEntryImpl) isNavigable() {}
+
+type navigableSelectImpl struct {
+ *navigableExprImpl
+}
+
+func (sel navigableSelectImpl) FieldName() string {
+ return sel.ToExpr().GetSelectExpr().GetField()
+}
+
+func (sel navigableSelectImpl) IsTestOnly() bool {
+ return sel.ToExpr().GetSelectExpr().GetTestOnly()
+}
+
+func (sel navigableSelectImpl) Operand() NavigableExpr {
+ return sel.createChild(sel.ToExpr().GetSelectExpr().GetOperand())
+}
+
+type navigableStructImpl struct {
+ *navigableExprImpl
+}
+
+func (s navigableStructImpl) TypeName() string {
+ return s.ToExpr().GetStructExpr().GetMessageName()
+}
+
+func (s navigableStructImpl) Fields() []NavigableField {
+ fieldInits := s.ToExpr().GetStructExpr().GetEntries()
+ fields := make([]NavigableField, len(fieldInits))
+ for i, f := range fieldInits {
+ fields[i] = navigableFieldImpl{
+ name: f.GetFieldKey(),
+ val: s.createChild(f.GetValue()),
+ isOpt: f.GetOptionalEntry(),
+ }
+ }
+ return fields
+}
+
+type navigableFieldImpl struct {
+ name string
+ val NavigableExpr
+ isOpt bool
+}
+
+func (f navigableFieldImpl) FieldName() string {
+ return f.name
+}
+
+func (f navigableFieldImpl) Value() NavigableExpr {
+ return f.val
+}
+
+func (f navigableFieldImpl) IsOptional() bool {
+ return f.isOpt
+}
+
+func (f navigableFieldImpl) isNavigable() {}
+
+func kindOf(expr *exprpb.Expr) (ExprKind, childFactory) {
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ return LiteralKind, noopFactory
+ case *exprpb.Expr_IdentExpr:
+ return IdentKind, noopFactory
+ case *exprpb.Expr_SelectExpr:
+ return SelectKind, selectFactory
+ case *exprpb.Expr_CallExpr:
+ return CallKind, callArgFactory
+ case *exprpb.Expr_ListExpr:
+ return ListKind, listElemFactory
+ case *exprpb.Expr_StructExpr:
+ if expr.GetStructExpr().GetMessageName() != "" {
+ return StructKind, structEntryFactory
+ }
+ return MapKind, mapEntryFactory
+ case *exprpb.Expr_ComprehensionExpr:
+ return ComprehensionKind, comprehensionFactory
+ default:
+ return UnspecifiedKind, noopFactory
+ }
+}
+
+type childFactory func(*navigableExprImpl) []NavigableExpr
+
+func noopFactory(*navigableExprImpl) []NavigableExpr {
+ return nil
+}
+
+func selectFactory(nav *navigableExprImpl) []NavigableExpr {
+ return []NavigableExpr{
+ nav.createChild(nav.ToExpr().GetSelectExpr().GetOperand()),
+ }
+}
+
+func callArgFactory(nav *navigableExprImpl) []NavigableExpr {
+ call := nav.ToExpr().GetCallExpr()
+ argCount := len(call.GetArgs())
+ if call.GetTarget() != nil {
+ argCount++
+ }
+ navExprs := make([]NavigableExpr, argCount)
+ i := 0
+ if call.GetTarget() != nil {
+ navExprs[i] = nav.createChild(call.GetTarget())
+ i++
+ }
+ for _, arg := range call.GetArgs() {
+ navExprs[i] = nav.createChild(arg)
+ i++
+ }
+ return navExprs
+}
+
+func listElemFactory(nav *navigableExprImpl) []NavigableExpr {
+ l := nav.ToExpr().GetListExpr()
+ navExprs := make([]NavigableExpr, len(l.GetElements()))
+ for i, e := range l.GetElements() {
+ navExprs[i] = nav.createChild(e)
+ }
+ return navExprs
+}
+
+func structEntryFactory(nav *navigableExprImpl) []NavigableExpr {
+ s := nav.ToExpr().GetStructExpr()
+ entries := make([]NavigableExpr, len(s.GetEntries()))
+ for i, e := range s.GetEntries() {
+
+ entries[i] = nav.createChild(e.GetValue())
+ }
+ return entries
+}
+
+func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr {
+ s := nav.ToExpr().GetStructExpr()
+ entries := make([]NavigableExpr, len(s.GetEntries())*2)
+ j := 0
+ for _, e := range s.GetEntries() {
+ entries[j] = nav.createChild(e.GetMapKey())
+ entries[j+1] = nav.createChild(e.GetValue())
+ j += 2
+ }
+ return entries
+}
+
+func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr {
+ compre := nav.ToExpr().GetComprehensionExpr()
+ return []NavigableExpr{
+ nav.createChild(compre.GetIterRange()),
+ nav.createChild(compre.GetAccuInit()),
+ nav.createChild(compre.GetLoopCondition()),
+ nav.createChild(compre.GetLoopStep()),
+ nav.createChild(compre.GetResult()),
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/containers/BUILD.bazel b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
new file mode 100644
index 0000000..3f3f078
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
@@ -0,0 +1,31 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "container.go",
+ ],
+ importpath = "github.com/google/cel-go/common/containers",
+ deps = [
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "container_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/containers/container.go b/vendor/github.com/google/cel-go/common/containers/container.go
new file mode 100644
index 0000000..d46698d
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/containers/container.go
@@ -0,0 +1,316 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package containers defines types and functions for resolving qualified names within a namespace
+// or type provided to CEL.
+package containers
+
+import (
+ "fmt"
+ "strings"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+var (
+ // DefaultContainer has an empty container name.
+ DefaultContainer *Container = nil
+
+ // Empty map to search for aliases when needed.
+ noAliases = make(map[string]string)
+)
+
+// NewContainer creates a new Container with the fully-qualified name.
+func NewContainer(opts ...ContainerOption) (*Container, error) {
+ var c *Container
+ var err error
+ for _, opt := range opts {
+ c, err = opt(c)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return c, nil
+}
+
+// Container holds a reference to an optional qualified container name and set of aliases.
+//
+// The program container can be used to simplify variable, function, and type specification within
+// CEL programs and behaves more or less like a C++ namespace. See ResolveCandidateNames for more
+// details.
+type Container struct {
+ name string
+ aliases map[string]string
+}
+
+// Extend creates a new Container with the existing settings and applies a series of
+// ContainerOptions to further configure the new container.
+func (c *Container) Extend(opts ...ContainerOption) (*Container, error) {
+ if c == nil {
+ return NewContainer(opts...)
+ }
+ // Copy the name and aliases of the existing container.
+ ext := &Container{name: c.Name()}
+ if len(c.aliasSet()) > 0 {
+ aliasSet := make(map[string]string, len(c.aliasSet()))
+ for k, v := range c.aliasSet() {
+ aliasSet[k] = v
+ }
+ ext.aliases = aliasSet
+ }
+ // Apply the new options to the container.
+ var err error
+ for _, opt := range opts {
+ ext, err = opt(ext)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ext, nil
+}
+
+// Name returns the fully-qualified name of the container.
+//
+// The name may conceptually be a namespace, package, or type.
+func (c *Container) Name() string {
+ if c == nil {
+ return ""
+ }
+ return c.name
+}
+
+// ResolveCandidateNames returns the candidates name of namespaced identifiers in C++ resolution
+// order.
+//
+// Names which shadow other names are returned first. If a name includes a leading dot ('.'),
+// the name is treated as an absolute identifier which cannot be shadowed.
+//
+// Given a container name a.b.c.M.N and a type name R.s, this will deliver in order:
+//
+// a.b.c.M.N.R.s
+// a.b.c.M.R.s
+// a.b.c.R.s
+// a.b.R.s
+// a.R.s
+// R.s
+//
+// If aliases or abbreviations are configured for the container, then alias names will take
+// precedence over containerized names.
+func (c *Container) ResolveCandidateNames(name string) []string {
+ if strings.HasPrefix(name, ".") {
+ qn := name[1:]
+ alias, isAlias := c.findAlias(qn)
+ if isAlias {
+ return []string{alias}
+ }
+ return []string{qn}
+ }
+ alias, isAlias := c.findAlias(name)
+ if isAlias {
+ return []string{alias}
+ }
+ if c.Name() == "" {
+ return []string{name}
+ }
+ nextCont := c.Name()
+ candidates := []string{nextCont + "." + name}
+ for i := strings.LastIndex(nextCont, "."); i >= 0; i = strings.LastIndex(nextCont, ".") {
+ nextCont = nextCont[:i]
+ candidates = append(candidates, nextCont+"."+name)
+ }
+ return append(candidates, name)
+}
+
+// aliasSet returns the alias to fully-qualified name mapping stored in the container.
+func (c *Container) aliasSet() map[string]string {
+ if c == nil || c.aliases == nil {
+ return noAliases
+ }
+ return c.aliases
+}
+
+// findAlias takes a name as input and returns an alias expansion if one exists.
+//
+// If the name is qualified, the first component of the qualified name is checked against known
+// aliases. Any alias that is found in a qualified name is expanded in the result:
+//
+// alias: R -> my.alias.R
+// name: R.S.T
+// output: my.alias.R.S.T
+//
+// Note, the name must not have a leading dot.
+func (c *Container) findAlias(name string) (string, bool) {
+ // If an alias exists for the name, ensure it is searched last.
+ simple := name
+ qualifier := ""
+ dot := strings.Index(name, ".")
+ if dot >= 0 {
+ simple = name[0:dot]
+ qualifier = name[dot:]
+ }
+ alias, found := c.aliasSet()[simple]
+ if !found {
+ return "", false
+ }
+ return alias + qualifier, true
+}
+
+// ContainerOption specifies a functional configuration option for a Container.
+//
+// Note, ContainerOption implementations must be able to handle nil container inputs.
+type ContainerOption func(*Container) (*Container, error)
+
+// Abbrevs configures a set of simple names as abbreviations for fully-qualified names.
+//
+// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name.
+// Abbreviations can be useful when working with variables, functions, and especially types from
+// multiple namespaces:
+//
+// // CEL object construction
+// qual.pkg.version.ObjTypeName{
+// field: alt.container.ver.FieldTypeName{value: ...}
+// }
+//
+// Only one the qualified names above may be used as the CEL container, so at least one of these
+// references must be a long qualified name within an otherwise short CEL program. Using the
+// following abbreviations, the program becomes much simpler:
+//
+// // CEL Go option
+// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
+// // Simplified Object construction
+// ObjTypeName{field: FieldTypeName{value: ...}}
+//
+// There are a few rules for the qualified names and the simple abbreviations generated from them:
+// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`.
+// - The last element in the qualified name is the abbreviation.
+// - Abbreviations must not collide with each other.
+// - The abbreviation must not collide with unqualified names in use.
+//
+// Abbreviations are distinct from container-based references in the following important ways:
+// - Abbreviations must expand to a fully-qualified name.
+// - Expanded abbreviations do not participate in namespace resolution.
+// - Abbreviation expansion is done instead of the container search for a matching identifier.
+// - Containers follow C++ namespace resolution rules with searches from the most qualified name
+// to the least qualified name.
+// - Container references within the CEL program may be relative, and are resolved to fully
+// qualified names at either type-check time or program plan time, whichever comes first.
+//
+// If there is ever a case where an identifier could be in both the container and as an
+// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is
+// preserved between compilations even as the container evolves.
+func Abbrevs(qualifiedNames ...string) ContainerOption {
+ return func(c *Container) (*Container, error) {
+ for _, qn := range qualifiedNames {
+ ind := strings.LastIndex(qn, ".")
+ if ind <= 0 || ind >= len(qn)-1 {
+ return nil, fmt.Errorf(
+ "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn)
+ }
+ alias := qn[ind+1:]
+ var err error
+ c, err = aliasAs("abbreviation", qn, alias)(c)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return c, nil
+ }
+}
+
+// Alias associates a fully-qualified name with a user-defined alias.
+//
+// In general, Abbrevs is preferred to Alias since the names generated from the Abbrevs option
+// are more easily traced back to source code. The Alias option is useful for propagating alias
+// configuration from one Container instance to another, and may also be useful for remapping
+// poorly chosen protobuf message / package names.
+//
+// Note: all of the rules that apply to Abbrevs also apply to Alias.
+func Alias(qualifiedName, alias string) ContainerOption {
+ return aliasAs("alias", qualifiedName, alias)
+}
+
+func aliasAs(kind, qualifiedName, alias string) ContainerOption {
+ return func(c *Container) (*Container, error) {
+ if len(alias) == 0 || strings.Contains(alias, ".") {
+ return nil, fmt.Errorf(
+ "%s must be non-empty and simple (not qualified): %s=%s", kind, kind, alias)
+ }
+
+ if qualifiedName[0:1] == "." {
+ return nil, fmt.Errorf("qualified name must not begin with a leading '.': %s",
+ qualifiedName)
+ }
+ ind := strings.LastIndex(qualifiedName, ".")
+ if ind <= 0 || ind == len(qualifiedName)-1 {
+ return nil, fmt.Errorf("%s must refer to a valid qualified name: %s",
+ kind, qualifiedName)
+ }
+ aliasRef, found := c.aliasSet()[alias]
+ if found {
+ return nil, fmt.Errorf(
+ "%s collides with existing reference: name=%s, %s=%s, existing=%s",
+ kind, qualifiedName, kind, alias, aliasRef)
+ }
+ if strings.HasPrefix(c.Name(), alias+".") || c.Name() == alias {
+ return nil, fmt.Errorf(
+ "%s collides with container name: name=%s, %s=%s, container=%s",
+ kind, qualifiedName, kind, alias, c.Name())
+ }
+ if c == nil {
+ c = &Container{}
+ }
+ if c.aliases == nil {
+ c.aliases = make(map[string]string)
+ }
+ c.aliases[alias] = qualifiedName
+ return c, nil
+ }
+}
+
+// Name sets the fully-qualified name of the Container.
+func Name(name string) ContainerOption {
+ return func(c *Container) (*Container, error) {
+ if len(name) > 0 && name[0:1] == "." {
+ return nil, fmt.Errorf("container name must not contain a leading '.': %s", name)
+ }
+ if c.Name() == name {
+ return c, nil
+ }
+ if c == nil {
+ return &Container{name: name}, nil
+ }
+ c.name = name
+ return c, nil
+ }
+}
+
+// ToQualifiedName converts an expression AST into a qualified name if possible, with a boolean
+// 'found' value that indicates if the conversion is successful.
+func ToQualifiedName(e *exprpb.Expr) (string, bool) {
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ id := e.GetIdentExpr()
+ return id.GetName(), true
+ case *exprpb.Expr_SelectExpr:
+ sel := e.GetSelectExpr()
+ // Test only expressions are not valid as qualified names.
+ if sel.GetTestOnly() {
+ return "", false
+ }
+ if qual, found := ToQualifiedName(sel.GetOperand()); found {
+ return qual + "." + sel.GetField(), true
+ }
+ }
+ return "", false
+}
diff --git a/vendor/github.com/google/cel-go/common/cost.go b/vendor/github.com/google/cel-go/common/cost.go
new file mode 100644
index 0000000..5e24bd0
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/cost.go
@@ -0,0 +1,40 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+const (
+ // SelectAndIdentCost is the cost of an operation that accesses an identifier or performs a select.
+ SelectAndIdentCost = 1
+
+ // ConstCost is the cost of an operation that accesses a constant.
+ ConstCost = 0
+
+ // ListCreateBaseCost is the base cost of any operation that creates a new list.
+ ListCreateBaseCost = 10
+
+ // MapCreateBaseCost is the base cost of any operation that creates a new map.
+ MapCreateBaseCost = 30
+
+ // StructCreateBaseCost is the base cost of any operation that creates a new struct.
+ StructCreateBaseCost = 40
+
+ // StringTraversalCostFactor is multiplied to a length of a string when computing the cost of traversing the entire
+ // string once.
+ StringTraversalCostFactor = 0.1
+
+ // RegexStringLengthCostFactor is multiplied ot the length of a regex string pattern when computing the cost of
+ // applying the regex to a string of unit cost.
+ RegexStringLengthCostFactor = 0.25
+)
diff --git a/vendor/github.com/google/cel-go/common/debug/BUILD.bazel b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
new file mode 100644
index 0000000..1f02983
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
@@ -0,0 +1,18 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "debug.go",
+ ],
+ importpath = "github.com/google/cel-go/common/debug",
+ deps = [
+ "//common:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/debug/debug.go b/vendor/github.com/google/cel-go/common/debug/debug.go
new file mode 100644
index 0000000..5dab156
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/debug/debug.go
@@ -0,0 +1,311 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package debug provides tools to print a parsed expression graph and
+// adorn each expression element with additional metadata.
+package debug
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Adorner returns debug metadata that will be tacked on to the string
+// representation of an expression.
+type Adorner interface {
+ // GetMetadata for the input context.
+ GetMetadata(ctx any) string
+}
+
+// Writer manages writing expressions to an internal string.
+type Writer interface {
+ fmt.Stringer
+
+ // Buffer pushes an expression into an internal queue of expressions to
+ // write to a string.
+ Buffer(e *exprpb.Expr)
+}
+
+type emptyDebugAdorner struct {
+}
+
+var emptyAdorner Adorner = &emptyDebugAdorner{}
+
+func (a *emptyDebugAdorner) GetMetadata(e any) string {
+ return ""
+}
+
+// ToDebugString gives the unadorned string representation of the Expr.
+func ToDebugString(e *exprpb.Expr) string {
+ return ToAdornedDebugString(e, emptyAdorner)
+}
+
+// ToAdornedDebugString gives the adorned string representation of the Expr.
+func ToAdornedDebugString(e *exprpb.Expr, adorner Adorner) string {
+ w := newDebugWriter(adorner)
+ w.Buffer(e)
+ return w.String()
+}
+
+// debugWriter is used to print out pretty-printed debug strings.
+type debugWriter struct {
+ adorner Adorner
+ buffer bytes.Buffer
+ indent int
+ lineStart bool
+}
+
+func newDebugWriter(a Adorner) *debugWriter {
+ return &debugWriter{
+ adorner: a,
+ indent: 0,
+ lineStart: true,
+ }
+}
+
+func (w *debugWriter) Buffer(e *exprpb.Expr) {
+ if e == nil {
+ return
+ }
+ switch e.ExprKind.(type) {
+ case *exprpb.Expr_ConstExpr:
+ w.append(formatLiteral(e.GetConstExpr()))
+ case *exprpb.Expr_IdentExpr:
+ w.append(e.GetIdentExpr().Name)
+ case *exprpb.Expr_SelectExpr:
+ w.appendSelect(e.GetSelectExpr())
+ case *exprpb.Expr_CallExpr:
+ w.appendCall(e.GetCallExpr())
+ case *exprpb.Expr_ListExpr:
+ w.appendList(e.GetListExpr())
+ case *exprpb.Expr_StructExpr:
+ w.appendStruct(e.GetStructExpr())
+ case *exprpb.Expr_ComprehensionExpr:
+ w.appendComprehension(e.GetComprehensionExpr())
+ }
+ w.adorn(e)
+}
+
+func (w *debugWriter) appendSelect(sel *exprpb.Expr_Select) {
+ w.Buffer(sel.GetOperand())
+ w.append(".")
+ w.append(sel.GetField())
+ if sel.TestOnly {
+ w.append("~test-only~")
+ }
+}
+
+func (w *debugWriter) appendCall(call *exprpb.Expr_Call) {
+ if call.Target != nil {
+ w.Buffer(call.GetTarget())
+ w.append(".")
+ }
+ w.append(call.GetFunction())
+ w.append("(")
+ if len(call.GetArgs()) > 0 {
+ w.addIndent()
+ w.appendLine()
+ for i, arg := range call.GetArgs() {
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ w.Buffer(arg)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append(")")
+}
+
+func (w *debugWriter) appendList(list *exprpb.Expr_CreateList) {
+ w.append("[")
+ if len(list.GetElements()) > 0 {
+ w.appendLine()
+ w.addIndent()
+ for i, elem := range list.GetElements() {
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ w.Buffer(elem)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append("]")
+}
+
+func (w *debugWriter) appendStruct(obj *exprpb.Expr_CreateStruct) {
+ if obj.MessageName != "" {
+ w.appendObject(obj)
+ } else {
+ w.appendMap(obj)
+ }
+}
+
+func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) {
+ w.append(obj.GetMessageName())
+ w.append("{")
+ if len(obj.GetEntries()) > 0 {
+ w.appendLine()
+ w.addIndent()
+ for i, entry := range obj.GetEntries() {
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ if entry.GetOptionalEntry() {
+ w.append("?")
+ }
+ w.append(entry.GetFieldKey())
+ w.append(":")
+ w.Buffer(entry.GetValue())
+ w.adorn(entry)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append("}")
+}
+
+func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) {
+ w.append("{")
+ if len(obj.GetEntries()) > 0 {
+ w.appendLine()
+ w.addIndent()
+ for i, entry := range obj.GetEntries() {
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ if entry.GetOptionalEntry() {
+ w.append("?")
+ }
+ w.Buffer(entry.GetMapKey())
+ w.append(":")
+ w.Buffer(entry.GetValue())
+ w.adorn(entry)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append("}")
+}
+
+func (w *debugWriter) appendComprehension(comprehension *exprpb.Expr_Comprehension) {
+ w.append("__comprehension__(")
+ w.addIndent()
+ w.appendLine()
+ w.append("// Variable")
+ w.appendLine()
+ w.append(comprehension.GetIterVar())
+ w.append(",")
+ w.appendLine()
+ w.append("// Target")
+ w.appendLine()
+ w.Buffer(comprehension.GetIterRange())
+ w.append(",")
+ w.appendLine()
+ w.append("// Accumulator")
+ w.appendLine()
+ w.append(comprehension.GetAccuVar())
+ w.append(",")
+ w.appendLine()
+ w.append("// Init")
+ w.appendLine()
+ w.Buffer(comprehension.GetAccuInit())
+ w.append(",")
+ w.appendLine()
+ w.append("// LoopCondition")
+ w.appendLine()
+ w.Buffer(comprehension.GetLoopCondition())
+ w.append(",")
+ w.appendLine()
+ w.append("// LoopStep")
+ w.appendLine()
+ w.Buffer(comprehension.GetLoopStep())
+ w.append(",")
+ w.appendLine()
+ w.append("// Result")
+ w.appendLine()
+ w.Buffer(comprehension.GetResult())
+ w.append(")")
+ w.removeIndent()
+}
+
+func formatLiteral(c *exprpb.Constant) string {
+ switch c.GetConstantKind().(type) {
+ case *exprpb.Constant_BoolValue:
+ return fmt.Sprintf("%t", c.GetBoolValue())
+ case *exprpb.Constant_BytesValue:
+ return fmt.Sprintf("b\"%s\"", string(c.GetBytesValue()))
+ case *exprpb.Constant_DoubleValue:
+ return fmt.Sprintf("%v", c.GetDoubleValue())
+ case *exprpb.Constant_Int64Value:
+ return fmt.Sprintf("%d", c.GetInt64Value())
+ case *exprpb.Constant_StringValue:
+ return strconv.Quote(c.GetStringValue())
+ case *exprpb.Constant_Uint64Value:
+ return fmt.Sprintf("%du", c.GetUint64Value())
+ case *exprpb.Constant_NullValue:
+ return "null"
+ default:
+ panic("Unknown constant type")
+ }
+}
+
+func (w *debugWriter) append(s string) {
+ w.doIndent()
+ w.buffer.WriteString(s)
+}
+
+func (w *debugWriter) appendFormat(f string, args ...any) {
+ w.append(fmt.Sprintf(f, args...))
+}
+
+func (w *debugWriter) doIndent() {
+ if w.lineStart {
+ w.lineStart = false
+ w.buffer.WriteString(strings.Repeat(" ", w.indent))
+ }
+}
+
+func (w *debugWriter) adorn(e any) {
+ w.append(w.adorner.GetMetadata(e))
+}
+
+func (w *debugWriter) appendLine() {
+ w.buffer.WriteString("\n")
+ w.lineStart = true
+}
+
+func (w *debugWriter) addIndent() {
+ w.indent++
+}
+
+func (w *debugWriter) removeIndent() {
+ w.indent--
+ if w.indent < 0 {
+ panic("negative indent")
+ }
+}
+
+func (w *debugWriter) String() string {
+ return w.buffer.String()
+}
diff --git a/vendor/github.com/google/cel-go/common/decls/BUILD.bazel b/vendor/github.com/google/cel-go/common/decls/BUILD.bazel
new file mode 100644
index 0000000..17791dc
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/decls/BUILD.bazel
@@ -0,0 +1,39 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "decls.go",
+ ],
+ importpath = "github.com/google/cel-go/common/decls",
+ deps = [
+ "//checker/decls:go_default_library",
+ "//common/functions:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ srcs = [
+ "decls_test.go",
+ ],
+ embed = [":go_default_library"],
+ deps = [
+ "//checker/decls:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/decls/decls.go b/vendor/github.com/google/cel-go/common/decls/decls.go
new file mode 100644
index 0000000..734ebe5
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/decls/decls.go
@@ -0,0 +1,844 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package decls contains function and variable declaration structs and helper methods.
+package decls
+
+import (
+ "fmt"
+ "strings"
+
+ chkdecls "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// NewFunction creates a new function declaration with a set of function options to configure overloads
+// and function definitions (implementations).
+//
+// Functions are checked for name collisions and singleton redefinition.
+func NewFunction(name string, opts ...FunctionOpt) (*FunctionDecl, error) {
+ fn := &FunctionDecl{
+ name: name,
+ overloads: map[string]*OverloadDecl{},
+ overloadOrdinals: []string{},
+ }
+ var err error
+ for _, opt := range opts {
+ fn, err = opt(fn)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(fn.overloads) == 0 {
+ return nil, fmt.Errorf("function %s must have at least one overload", name)
+ }
+ return fn, nil
+}
+
+// FunctionDecl defines a function name, overload set, and optionally a singleton definition for all
+// overload instances.
+type FunctionDecl struct {
+ name string
+
+ // overloads associated with the function name.
+ overloads map[string]*OverloadDecl
+
+ // singleton implementation of the function for all overloads.
+ //
+ // If this option is set, an error will occur if any overloads specify a per-overload implementation
+ // or if another function with the same name attempts to redefine the singleton.
+ singleton *functions.Overload
+
+ // disableTypeGuards is a performance optimization to disable detailed runtime type checks which could
+ // add overhead on common operations. Setting this option true leaves error checks and argument checks
+ // intact.
+ disableTypeGuards bool
+
+ // state indicates that the binding should be provided as a declaration, as a runtime binding, or both.
+ state declarationState
+
+ // overloadOrdinals indicates the order in which the overload was declared.
+ overloadOrdinals []string
+}
+
+type declarationState int
+
+const (
+ declarationStateUnset declarationState = iota
+ declarationDisabled
+ declarationEnabled
+)
+
+// Name returns the function name in human-readable terms, e.g. 'contains' of 'math.least'
+func (f *FunctionDecl) Name() string {
+ if f == nil {
+ return ""
+ }
+ return f.name
+}
+
+// IsDeclarationDisabled indicates that the function implementation should be added to the dispatcher, but the
+// declaration should not be exposed for use in expressions.
+func (f *FunctionDecl) IsDeclarationDisabled() bool {
+ return f.state == declarationDisabled
+}
+
+// Merge combines an existing function declaration with another.
+//
+// If a function is extended, by say adding new overloads to an existing function, then it is merged with the
+// prior definition of the function at which point its overloads must not collide with pre-existing overloads
+// and its bindings (singleton, or per-overload) must not conflict with previous definitions either.
+func (f *FunctionDecl) Merge(other *FunctionDecl) (*FunctionDecl, error) {
+ if f == other {
+ return f, nil
+ }
+ if f.Name() != other.Name() {
+ return nil, fmt.Errorf("cannot merge unrelated functions. %s and %s", f.Name(), other.Name())
+ }
+ merged := &FunctionDecl{
+ name: f.Name(),
+ overloads: make(map[string]*OverloadDecl, len(f.overloads)),
+ singleton: f.singleton,
+ overloadOrdinals: make([]string, len(f.overloads)),
+ // if one function is expecting type-guards and the other is not, then they
+ // must not be disabled.
+ disableTypeGuards: f.disableTypeGuards && other.disableTypeGuards,
+ // default to the current functions declaration state.
+ state: f.state,
+ }
+ // If the other state indicates that the declaration should be explicitly enabled or
+ // disabled, then update the merged state with the most recent value.
+ if other.state != declarationStateUnset {
+ merged.state = other.state
+ }
+ // baseline copy of the overloads and their ordinals
+ copy(merged.overloadOrdinals, f.overloadOrdinals)
+ for oID, o := range f.overloads {
+ merged.overloads[oID] = o
+ }
+ // overloads and their ordinals are added from the left
+ for _, oID := range other.overloadOrdinals {
+ o := other.overloads[oID]
+ err := merged.AddOverload(o)
+ if err != nil {
+ return nil, fmt.Errorf("function declaration merge failed: %v", err)
+ }
+ }
+ if other.singleton != nil {
+ if merged.singleton != nil && merged.singleton != other.singleton {
+ return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
+ }
+ merged.singleton = other.singleton
+ }
+ return merged, nil
+}
+
+// AddOverload ensures that the new overload does not collide with an existing overload signature;
+// however, if the function signatures are identical, the implementation may be rewritten as its
+// difficult to compare functions by object identity.
+func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error {
+ if f == nil {
+ return fmt.Errorf("nil function cannot add overload: %s", overload.ID())
+ }
+ for oID, o := range f.overloads {
+ if oID != overload.ID() && o.SignatureOverlaps(overload) {
+ return fmt.Errorf("overload signature collision in function %s: %s collides with %s", f.Name(), oID, overload.ID())
+ }
+ if oID == overload.ID() {
+ if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() {
+ // Allow redefinition of an overload implementation so long as the signatures match.
+ f.overloads[oID] = overload
+ return nil
+ }
+ return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.Name(), oID)
+ }
+ }
+ f.overloadOrdinals = append(f.overloadOrdinals, overload.ID())
+ f.overloads[overload.ID()] = overload
+ return nil
+}
+
+// OverloadDecls returns the overload declarations in the order in which they were declared.
+func (f *FunctionDecl) OverloadDecls() []*OverloadDecl {
+ if f == nil {
+ return []*OverloadDecl{}
+ }
+ overloads := make([]*OverloadDecl, 0, len(f.overloads))
+ for _, oID := range f.overloadOrdinals {
+ overloads = append(overloads, f.overloads[oID])
+ }
+ return overloads
+}
+
+// Bindings produces a set of function bindings, if any are defined.
+func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) {
+ if f == nil {
+ return []*functions.Overload{}, nil
+ }
+ overloads := []*functions.Overload{}
+ nonStrict := false
+ for _, oID := range f.overloadOrdinals {
+ o := f.overloads[oID]
+ if o.hasBinding() {
+ overload := &functions.Overload{
+ Operator: o.ID(),
+ Unary: o.guardedUnaryOp(f.Name(), f.disableTypeGuards),
+ Binary: o.guardedBinaryOp(f.Name(), f.disableTypeGuards),
+ Function: o.guardedFunctionOp(f.Name(), f.disableTypeGuards),
+ OperandTrait: o.OperandTrait(),
+ NonStrict: o.IsNonStrict(),
+ }
+ overloads = append(overloads, overload)
+ nonStrict = nonStrict || o.IsNonStrict()
+ }
+ }
+ if f.singleton != nil {
+ if len(overloads) != 0 {
+ return nil, fmt.Errorf("singleton function incompatible with specialized overloads: %s", f.Name())
+ }
+ overloads = []*functions.Overload{
+ {
+ Operator: f.Name(),
+ Unary: f.singleton.Unary,
+ Binary: f.singleton.Binary,
+ Function: f.singleton.Function,
+ OperandTrait: f.singleton.OperandTrait,
+ },
+ }
+ // fall-through to return single overload case.
+ }
+ if len(overloads) == 0 {
+ return overloads, nil
+ }
+ // Single overload. Replicate an entry for it using the function name as well.
+ if len(overloads) == 1 {
+ if overloads[0].Operator == f.Name() {
+ return overloads, nil
+ }
+ return append(overloads, &functions.Overload{
+ Operator: f.Name(),
+ Unary: overloads[0].Unary,
+ Binary: overloads[0].Binary,
+ Function: overloads[0].Function,
+ NonStrict: overloads[0].NonStrict,
+ OperandTrait: overloads[0].OperandTrait,
+ }), nil
+ }
+ // All of the defined overloads are wrapped into a top-level function which
+ // performs dynamic dispatch to the proper overload based on the argument types.
+ bindings := append([]*functions.Overload{}, overloads...)
+ funcDispatch := func(args ...ref.Val) ref.Val {
+ for _, oID := range f.overloadOrdinals {
+ o := f.overloads[oID]
+ // During dynamic dispatch over multiple functions, signature agreement checks
+ // are preserved in order to assist with the function resolution step.
+ switch len(args) {
+ case 1:
+ if o.unaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) {
+ return o.unaryOp(args[0])
+ }
+ case 2:
+ if o.binaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) {
+ return o.binaryOp(args[0], args[1])
+ }
+ }
+ if o.functionOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) {
+ return o.functionOp(args...)
+ }
+ // eventually this will fall through to the noSuchOverload below.
+ }
+ return MaybeNoSuchOverload(f.Name(), args...)
+ }
+ function := &functions.Overload{
+ Operator: f.Name(),
+ Function: funcDispatch,
+ NonStrict: nonStrict,
+ }
+ return append(bindings, function), nil
+}
+
+// MaybeNoSuchOverload determines whether to propagate an error if one is provided as an argument, or
+// to return an unknown set, or to produce a new error for a missing function signature.
+func MaybeNoSuchOverload(funcName string, args ...ref.Val) ref.Val {
+ argTypes := make([]string, len(args))
+ var unk *types.Unknown = nil
+ for i, arg := range args {
+ if types.IsError(arg) {
+ return arg
+ }
+ if types.IsUnknown(arg) {
+ unk = types.MergeUnknowns(arg.(*types.Unknown), unk)
+ }
+ argTypes[i] = arg.Type().TypeName()
+ }
+ if unk != nil {
+ return unk
+ }
+ signature := strings.Join(argTypes, ", ")
+ return types.NewErr("no such overload: %s(%s)", funcName, signature)
+}
+
+// FunctionOpt defines a functional option for mutating a function declaration.
+type FunctionOpt func(*FunctionDecl) (*FunctionDecl, error)
+
+// DisableTypeGuards disables automatically generated function invocation guards on direct overload calls.
+// Type guards remain on during dynamic dispatch for parsed-only expressions.
+func DisableTypeGuards(value bool) FunctionOpt {
+ return func(fn *FunctionDecl) (*FunctionDecl, error) {
+ fn.disableTypeGuards = value
+ return fn, nil
+ }
+}
+
+// DisableDeclaration indicates that the function declaration should be disabled, but the runtime function
+// binding should be provided. Marking a function as runtime-only is a safe way to manage deprecations
+// of function declarations while still preserving the runtime behavior for previously compiled expressions.
+func DisableDeclaration(value bool) FunctionOpt {
+ return func(fn *FunctionDecl) (*FunctionDecl, error) {
+ if value {
+ fn.state = declarationDisabled
+ } else {
+ fn.state = declarationEnabled
+ }
+ return fn, nil
+ }
+}
+
+// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt {
+ trait := 0
+ for _, t := range traits {
+ trait = trait | t
+ }
+ return func(f *FunctionDecl) (*FunctionDecl, error) {
+ if f.singleton != nil {
+ return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
+ }
+ f.singleton = &functions.Overload{
+ Operator: f.Name(),
+ Unary: fn,
+ OperandTrait: trait,
+ }
+ return f, nil
+ }
+}
+
+// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt {
+ trait := 0
+ for _, t := range traits {
+ trait = trait | t
+ }
+ return func(f *FunctionDecl) (*FunctionDecl, error) {
+ if f.singleton != nil {
+ return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
+ }
+ f.singleton = &functions.Overload{
+ Operator: f.Name(),
+ Binary: fn,
+ OperandTrait: trait,
+ }
+ return f, nil
+ }
+}
+
+// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt {
+ trait := 0
+ for _, t := range traits {
+ trait = trait | t
+ }
+ return func(f *FunctionDecl) (*FunctionDecl, error) {
+ if f.singleton != nil {
+ return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
+ }
+ f.singleton = &functions.Overload{
+ Operator: f.Name(),
+ Function: fn,
+ OperandTrait: trait,
+ }
+ return f, nil
+ }
+}
+
+// Overload defines a new global overload with an overload id, argument types, and result type. Through the
+// use of OverloadOpt options, the overload may also be configured with a binding, an operand trait, and to
+// be non-strict.
+//
+// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
+// strict-ness should be rare occurrences.
+func Overload(overloadID string,
+ args []*types.Type, resultType *types.Type,
+ opts ...OverloadOpt) FunctionOpt {
+ return newOverload(overloadID, false, args, resultType, opts...)
+}
+
+// MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types,
+// and result type. Through the use of OverloadOpt options, the overload may also be configured with a binding,
+// an operand trait, and to be non-strict.
+//
+// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
+// strict-ness should be rare occurrences.
+func MemberOverload(overloadID string,
+ args []*types.Type, resultType *types.Type,
+ opts ...OverloadOpt) FunctionOpt {
+ return newOverload(overloadID, true, args, resultType, opts...)
+}
+
+func newOverload(overloadID string,
+ memberFunction bool, args []*types.Type, resultType *types.Type,
+ opts ...OverloadOpt) FunctionOpt {
+ return func(f *FunctionDecl) (*FunctionDecl, error) {
+ overload, err := newOverloadInternal(overloadID, memberFunction, args, resultType, opts...)
+ if err != nil {
+ return nil, err
+ }
+ err = f.AddOverload(overload)
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+ }
+}
+
+func newOverloadInternal(overloadID string,
+ memberFunction bool, args []*types.Type, resultType *types.Type,
+ opts ...OverloadOpt) (*OverloadDecl, error) {
+ overload := &OverloadDecl{
+ id: overloadID,
+ argTypes: args,
+ resultType: resultType,
+ isMemberFunction: memberFunction,
+ }
+ var err error
+ for _, opt := range opts {
+ overload, err = opt(overload)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return overload, nil
+}
+
+// OverloadDecl contains the definition of a single overload id with a specific signature, and an optional
+// implementation.
+type OverloadDecl struct {
+ id string
+ argTypes []*types.Type
+ resultType *types.Type
+ isMemberFunction bool
+ // nonStrict indicates that the function will accept error and unknown arguments as inputs.
+ nonStrict bool
+ // operandTrait indicates whether the member argument should have a specific type-trait.
+ //
+ // This is useful for creating overloads which operate on a type-interface rather than a concrete type.
+ operandTrait int
+
+ // Function implementation options. Optional, but encouraged.
+ // unaryOp is a function binding that takes a single argument.
+ unaryOp functions.UnaryOp
+ // binaryOp is a function binding that takes two arguments.
+ binaryOp functions.BinaryOp
+ // functionOp is a catch-all for zero-arity and three-plus arity functions.
+ functionOp functions.FunctionOp
+}
+
+// ID mirrors the overload signature and provides a unique id which may be referenced within the type-checker
+// and interpreter to optimize performance.
+//
+// The ID format is usually one of two styles:
+// global: __
+// member: ___
+func (o *OverloadDecl) ID() string {
+ if o == nil {
+ return ""
+ }
+ return o.id
+}
+
+// ArgTypes contains the set of argument types expected by the overload.
+//
+// For member functions ArgTypes[0] represents the member operand type.
+func (o *OverloadDecl) ArgTypes() []*types.Type {
+ if o == nil {
+ return emptyArgs
+ }
+ return o.argTypes
+}
+
+// IsMemberFunction indicates whether the overload is a member function
+func (o *OverloadDecl) IsMemberFunction() bool {
+ if o == nil {
+ return false
+ }
+ return o.isMemberFunction
+}
+
+// IsNonStrict returns whether the overload accepts errors and unknown values as arguments.
+func (o *OverloadDecl) IsNonStrict() bool {
+ if o == nil {
+ return false
+ }
+ return o.nonStrict
+}
+
+// OperandTrait returns the trait mask of the first operand to the overload call, e.g.
+// `traits.Indexer`
+func (o *OverloadDecl) OperandTrait() int {
+ if o == nil {
+ return 0
+ }
+ return o.operandTrait
+}
+
+// ResultType indicates the output type from calling the function.
+func (o *OverloadDecl) ResultType() *types.Type {
+ if o == nil {
+ // *types.Type is nil-safe
+ return nil
+ }
+ return o.resultType
+}
+
+// TypeParams returns the type parameter names associated with the overload.
+func (o *OverloadDecl) TypeParams() []string {
+ typeParams := map[string]struct{}{}
+ collectParamNames(typeParams, o.ResultType())
+ for _, arg := range o.ArgTypes() {
+ collectParamNames(typeParams, arg)
+ }
+ params := make([]string, 0, len(typeParams))
+ for param := range typeParams {
+ params = append(params, param)
+ }
+ return params
+}
+
+// SignatureEquals determines whether the incoming overload declaration signature is equal to the current signature.
+//
+// Result type, operand trait, and strict-ness are not considered as part of signature equality.
+func (o *OverloadDecl) SignatureEquals(other *OverloadDecl) bool {
+ if o == other {
+ return true
+ }
+ if o.ID() != other.ID() || o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) {
+ return false
+ }
+ for i, at := range o.ArgTypes() {
+ oat := other.ArgTypes()[i]
+ if !at.IsEquivalentType(oat) {
+ return false
+ }
+ }
+ return o.ResultType().IsEquivalentType(other.ResultType())
+}
+
+// SignatureOverlaps indicates whether two functions have non-equal, but overloapping function signatures.
+//
+// For example, list(dyn) collides with list(string) since the 'dyn' type can contain a 'string' type.
+func (o *OverloadDecl) SignatureOverlaps(other *OverloadDecl) bool {
+ if o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) {
+ return false
+ }
+ argsOverlap := true
+ for i, argType := range o.ArgTypes() {
+ otherArgType := other.ArgTypes()[i]
+ argsOverlap = argsOverlap &&
+ (argType.IsAssignableType(otherArgType) ||
+ otherArgType.IsAssignableType(argType))
+ }
+ return argsOverlap
+}
+
+// hasBinding indicates whether the overload already has a definition.
+func (o *OverloadDecl) hasBinding() bool {
+ return o != nil && (o.unaryOp != nil || o.binaryOp != nil || o.functionOp != nil)
+}
+
+// guardedUnaryOp creates an invocation guard around the provided unary operator, if one is defined.
+func (o *OverloadDecl) guardedUnaryOp(funcName string, disableTypeGuards bool) functions.UnaryOp {
+ if o.unaryOp == nil {
+ return nil
+ }
+ return func(arg ref.Val) ref.Val {
+ if !o.matchesRuntimeUnarySignature(disableTypeGuards, arg) {
+ return MaybeNoSuchOverload(funcName, arg)
+ }
+ return o.unaryOp(arg)
+ }
+}
+
+// guardedBinaryOp creates an invocation guard around the provided binary operator, if one is defined.
+func (o *OverloadDecl) guardedBinaryOp(funcName string, disableTypeGuards bool) functions.BinaryOp {
+ if o.binaryOp == nil {
+ return nil
+ }
+ return func(arg1, arg2 ref.Val) ref.Val {
+ if !o.matchesRuntimeBinarySignature(disableTypeGuards, arg1, arg2) {
+ return MaybeNoSuchOverload(funcName, arg1, arg2)
+ }
+ return o.binaryOp(arg1, arg2)
+ }
+}
+
+// guardedFunctionOp creates an invocation guard around the provided variadic function binding, if one is provided.
+func (o *OverloadDecl) guardedFunctionOp(funcName string, disableTypeGuards bool) functions.FunctionOp {
+ if o.functionOp == nil {
+ return nil
+ }
+ return func(args ...ref.Val) ref.Val {
+ if !o.matchesRuntimeSignature(disableTypeGuards, args...) {
+ return MaybeNoSuchOverload(funcName, args...)
+ }
+ return o.functionOp(args...)
+ }
+}
+
+// matchesRuntimeUnarySignature indicates whether the argument type is runtime assiganble to the overload's expected argument.
+func (o *OverloadDecl) matchesRuntimeUnarySignature(disableTypeGuards bool, arg ref.Val) bool {
+ return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg) &&
+ matchOperandTrait(o.OperandTrait(), arg)
+}
+
+// matchesRuntimeBinarySignature indicates whether the argument types are runtime assiganble to the overload's expected arguments.
+func (o *OverloadDecl) matchesRuntimeBinarySignature(disableTypeGuards bool, arg1, arg2 ref.Val) bool {
+ return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg1) &&
+ matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[1], arg2) &&
+ matchOperandTrait(o.OperandTrait(), arg1)
+}
+
+// matchesRuntimeSignature indicates whether the argument types are runtime assiganble to the overload's expected arguments.
+func (o *OverloadDecl) matchesRuntimeSignature(disableTypeGuards bool, args ...ref.Val) bool {
+ if len(args) != len(o.ArgTypes()) {
+ return false
+ }
+ if len(args) == 0 {
+ return true
+ }
+ for i, arg := range args {
+ if !matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[i], arg) {
+ return false
+ }
+ }
+ return matchOperandTrait(o.OperandTrait(), args[0])
+}
+
+func matchRuntimeArgType(nonStrict, disableTypeGuards bool, argType *types.Type, arg ref.Val) bool {
+ if nonStrict && (disableTypeGuards || types.IsUnknownOrError(arg)) {
+ return true
+ }
+ if types.IsUnknownOrError(arg) {
+ return false
+ }
+ return disableTypeGuards || argType.IsAssignableRuntimeType(arg)
+}
+
+func matchOperandTrait(trait int, arg ref.Val) bool {
+ return trait == 0 || arg.Type().HasTrait(trait) || types.IsUnknownOrError(arg)
+}
+
+// OverloadOpt is a functional option for configuring a function overload.
+type OverloadOpt func(*OverloadDecl) (*OverloadDecl, error)
+
+// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func UnaryBinding(binding functions.UnaryOp) OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ if o.hasBinding() {
+ return nil, fmt.Errorf("overload already has a binding: %s", o.ID())
+ }
+ if len(o.ArgTypes()) != 1 {
+ return nil, fmt.Errorf("unary function bound to non-unary overload: %s", o.ID())
+ }
+ o.unaryOp = binding
+ return o, nil
+ }
+}
+
+// BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func BinaryBinding(binding functions.BinaryOp) OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ if o.hasBinding() {
+ return nil, fmt.Errorf("overload already has a binding: %s", o.ID())
+ }
+ if len(o.ArgTypes()) != 2 {
+ return nil, fmt.Errorf("binary function bound to non-binary overload: %s", o.ID())
+ }
+ o.binaryOp = binding
+ return o, nil
+ }
+}
+
+// FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func FunctionBinding(binding functions.FunctionOp) OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ if o.hasBinding() {
+ return nil, fmt.Errorf("overload already has a binding: %s", o.ID())
+ }
+ o.functionOp = binding
+ return o, nil
+ }
+}
+
+// OverloadIsNonStrict enables the function to be called with error and unknown argument values.
+//
+// Note: do not use this option unless absoluately necessary as it should be an uncommon feature.
+func OverloadIsNonStrict() OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ o.nonStrict = true
+ return o, nil
+ }
+}
+
+// OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be
+// successfully invoked.
+func OverloadOperandTrait(trait int) OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ o.operandTrait = trait
+ return o, nil
+ }
+}
+
+// NewConstant creates a new constant declaration.
+func NewConstant(name string, t *types.Type, v ref.Val) *VariableDecl {
+ return &VariableDecl{name: name, varType: t, value: v}
+}
+
+// NewVariable creates a new variable declaration.
+func NewVariable(name string, t *types.Type) *VariableDecl {
+ return &VariableDecl{name: name, varType: t}
+}
+
+// VariableDecl defines a variable declaration which may optionally have a constant value.
+type VariableDecl struct {
+ name string
+ varType *types.Type
+ value ref.Val
+}
+
+// Name returns the fully-qualified variable name
+func (v *VariableDecl) Name() string {
+ if v == nil {
+ return ""
+ }
+ return v.name
+}
+
+// Type returns the types.Type value associated with the variable.
+func (v *VariableDecl) Type() *types.Type {
+ if v == nil {
+ // types.Type is nil-safe
+ return nil
+ }
+ return v.varType
+}
+
+// Value returns the constant value associated with the declaration.
+func (v *VariableDecl) Value() ref.Val {
+ if v == nil {
+ return nil
+ }
+ return v.value
+}
+
+// DeclarationIsEquivalent returns true if one variable declaration has the same name and same type as the input.
+func (v *VariableDecl) DeclarationIsEquivalent(other *VariableDecl) bool {
+ if v == other {
+ return true
+ }
+ return v.Name() == other.Name() && v.Type().IsEquivalentType(other.Type())
+}
+
+// VariableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration.
+func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) {
+ varType, err := types.TypeToExprType(v.Type())
+ if err != nil {
+ return nil, err
+ }
+ return chkdecls.NewVar(v.Name(), varType), nil
+}
+
+// TypeVariable creates a new type identifier for use within a types.Provider
+func TypeVariable(t *types.Type) *VariableDecl {
+ return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t))
+}
+
+// FunctionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration.
+func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) {
+ overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads))
+ for i, oID := range f.overloadOrdinals {
+ o := f.overloads[oID]
+ paramNames := map[string]struct{}{}
+ argTypes := make([]*exprpb.Type, len(o.ArgTypes()))
+ for j, a := range o.ArgTypes() {
+ collectParamNames(paramNames, a)
+ at, err := types.TypeToExprType(a)
+ if err != nil {
+ return nil, err
+ }
+ argTypes[j] = at
+ }
+ collectParamNames(paramNames, o.ResultType())
+ resultType, err := types.TypeToExprType(o.ResultType())
+ if err != nil {
+ return nil, err
+ }
+ if len(paramNames) == 0 {
+ if o.IsMemberFunction() {
+ overloads[i] = chkdecls.NewInstanceOverload(oID, argTypes, resultType)
+ } else {
+ overloads[i] = chkdecls.NewOverload(oID, argTypes, resultType)
+ }
+ } else {
+ params := []string{}
+ for pn := range paramNames {
+ params = append(params, pn)
+ }
+ if o.IsMemberFunction() {
+ overloads[i] = chkdecls.NewParameterizedInstanceOverload(oID, argTypes, resultType, params)
+ } else {
+ overloads[i] = chkdecls.NewParameterizedOverload(oID, argTypes, resultType, params)
+ }
+ }
+ }
+ return chkdecls.NewFunction(f.Name(), overloads...), nil
+}
+
+func collectParamNames(paramNames map[string]struct{}, arg *types.Type) {
+ if arg.Kind() == types.TypeParamKind {
+ paramNames[arg.TypeName()] = struct{}{}
+ }
+ for _, param := range arg.Parameters() {
+ collectParamNames(paramNames, param)
+ }
+}
+
+var (
+ emptyArgs = []*types.Type{}
+)
diff --git a/vendor/github.com/google/cel-go/common/doc.go b/vendor/github.com/google/cel-go/common/doc.go
new file mode 100644
index 0000000..5362fdf
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package common defines types and utilities common to expression parsing,
+// checking, and interpretation
+package common
diff --git a/vendor/github.com/google/cel-go/common/error.go b/vendor/github.com/google/cel-go/common/error.go
new file mode 100644
index 0000000..774dcb5
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/error.go
@@ -0,0 +1,79 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/text/width"
+)
+
+// NewError creates an error associated with an expression id with the given message at the given location.
+func NewError(id int64, message string, location Location) *Error {
+ return &Error{Message: message, Location: location, ExprID: id}
+}
+
+// Error type which references an expression id, a location within source, and a message.
+type Error struct {
+ Location Location
+ Message string
+ ExprID int64
+}
+
+const (
+ dot = "."
+ ind = "^"
+
+ // maxSnippetLength is the largest number of characters which can be rendered in an error message snippet.
+ maxSnippetLength = 16384
+)
+
+var (
+ wideDot = width.Widen.String(dot)
+ wideInd = width.Widen.String(ind)
+)
+
+// ToDisplayString decorates the error message with the source location.
+func (e *Error) ToDisplayString(source Source) string {
+ var result = fmt.Sprintf("ERROR: %s:%d:%d: %s",
+ source.Description(),
+ e.Location.Line(),
+ e.Location.Column()+1, // add one to the 0-based column for display
+ e.Message)
+ if snippet, found := source.Snippet(e.Location.Line()); found && len(snippet) <= maxSnippetLength {
+ snippet := strings.Replace(snippet, "\t", " ", -1)
+ srcLine := "\n | " + snippet
+ var bytes = []byte(snippet)
+ var indLine = "\n | "
+ for i := 0; i < e.Location.Column() && len(bytes) > 0; i++ {
+ _, sz := utf8.DecodeRune(bytes)
+ bytes = bytes[sz:]
+ if sz > 1 {
+ indLine += wideDot
+ } else {
+ indLine += dot
+ }
+ }
+ if _, sz := utf8.DecodeRune(bytes); sz > 1 {
+ indLine += wideInd
+ } else {
+ indLine += ind
+ }
+ result += srcLine + indLine
+ }
+ return result
+}
diff --git a/vendor/github.com/google/cel-go/common/errors.go b/vendor/github.com/google/cel-go/common/errors.go
new file mode 100644
index 0000000..6391971
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/errors.go
@@ -0,0 +1,103 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// Errors type which contains a list of errors observed during parsing.
+type Errors struct {
+ errors []*Error
+ source Source
+ numErrors int
+ maxErrorsToReport int
+}
+
+// NewErrors creates a new instance of the Errors type.
+func NewErrors(source Source) *Errors {
+ return &Errors{
+ errors: []*Error{},
+ source: source,
+ maxErrorsToReport: 100,
+ }
+}
+
+// ReportError records an error at a source location.
+func (e *Errors) ReportError(l Location, format string, args ...any) {
+ e.ReportErrorAtID(0, l, format, args...)
+}
+
+// ReportErrorAtID records an error at a source location and expression id.
+func (e *Errors) ReportErrorAtID(id int64, l Location, format string, args ...any) {
+ e.numErrors++
+ if e.numErrors > e.maxErrorsToReport {
+ return
+ }
+ err := &Error{
+ ExprID: id,
+ Location: l,
+ Message: fmt.Sprintf(format, args...),
+ }
+ e.errors = append(e.errors, err)
+}
+
+// GetErrors returns the list of observed errors.
+func (e *Errors) GetErrors() []*Error {
+ return e.errors[:]
+}
+
+// Append creates a new Errors object with the current and input errors.
+func (e *Errors) Append(errs []*Error) *Errors {
+ return &Errors{
+ errors: append(e.errors, errs...),
+ source: e.source,
+ numErrors: e.numErrors + len(errs),
+ maxErrorsToReport: e.maxErrorsToReport,
+ }
+}
+
+// ToDisplayString returns the error set to a newline delimited string.
+func (e *Errors) ToDisplayString() string {
+ errorsInString := e.maxErrorsToReport
+ if e.numErrors > e.maxErrorsToReport {
+ // add one more error to indicate the number of errors truncated.
+ errorsInString++
+ } else {
+ // otherwise the error set will just contain the number of errors.
+ errorsInString = e.numErrors
+ }
+
+ result := make([]string, errorsInString)
+ sort.SliceStable(e.errors, func(i, j int) bool {
+ ei := e.errors[i].Location
+ ej := e.errors[j].Location
+ return ei.Line() < ej.Line() ||
+ (ei.Line() == ej.Line() && ei.Column() < ej.Column())
+ })
+ for i, err := range e.errors {
+ // This can happen during the append of two errors objects
+ if i >= e.maxErrorsToReport {
+ break
+ }
+ result[i] = err.ToDisplayString(e.source)
+ }
+ if e.numErrors > e.maxErrorsToReport {
+ result[e.maxErrorsToReport] = fmt.Sprintf("%d more errors were truncated", e.numErrors-e.maxErrorsToReport)
+ }
+ return strings.Join(result, "\n")
+}
diff --git a/vendor/github.com/google/cel-go/common/functions/BUILD.bazel b/vendor/github.com/google/cel-go/common/functions/BUILD.bazel
new file mode 100644
index 0000000..3cc27d6
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/functions/BUILD.bazel
@@ -0,0 +1,17 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "functions.go",
+ ],
+ importpath = "github.com/google/cel-go/common/functions",
+ deps = [
+ "//common/types/ref:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/functions/functions.go b/vendor/github.com/google/cel-go/common/functions/functions.go
new file mode 100644
index 0000000..67f4a59
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/functions/functions.go
@@ -0,0 +1,61 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package functions defines the standard builtin functions supported by the interpreter
+package functions
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Overload defines a named overload of a function, indicating an operand trait
+// which must be present on the first argument to the overload as well as one
+// of either a unary, binary, or function implementation.
+//
+// The majority of operators within the expression language are unary or binary
+// and the specializations simplify the call contract for implementers of
+// types with operator overloads. Any added complexity is assumed to be handled
+// by the generic FunctionOp.
+type Overload struct {
+ // Operator name as written in an expression or defined within
+ // operators.go.
+ Operator string
+
+ // Operand trait used to dispatch the call. The zero-value indicates a
+ // global function overload or that one of the Unary / Binary / Function
+ // definitions should be used to execute the call.
+ OperandTrait int
+
+ // Unary defines the overload with a UnaryOp implementation. May be nil.
+ Unary UnaryOp
+
+ // Binary defines the overload with a BinaryOp implementation. May be nil.
+ Binary BinaryOp
+
+ // Function defines the overload with a FunctionOp implementation. May be
+ // nil.
+ Function FunctionOp
+
+ // NonStrict specifies whether the Overload will tolerate arguments that
+ // are types.Err or types.Unknown.
+ NonStrict bool
+}
+
+// UnaryOp is a function that takes a single value and produces an output.
+type UnaryOp func(value ref.Val) ref.Val
+
+// BinaryOp is a function that takes two values and produces an output.
+type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val
+
+// FunctionOp is a function with accepts zero or more arguments and produces
+// a value or error as a result.
+type FunctionOp func(values ...ref.Val) ref.Val
diff --git a/vendor/github.com/google/cel-go/common/location.go b/vendor/github.com/google/cel-go/common/location.go
new file mode 100644
index 0000000..ec3fa7c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/location.go
@@ -0,0 +1,51 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+// Location interface to represent a location within Source.
+type Location interface {
+ Line() int // 1-based line number within source.
+ Column() int // 0-based column number within source.
+}
+
+// SourceLocation helper type to manually construct a location.
+type SourceLocation struct {
+ line int
+ column int
+}
+
+var (
+ // Location implements the SourceLocation interface.
+ _ Location = &SourceLocation{}
+ // NoLocation is a particular illegal location.
+ NoLocation = &SourceLocation{-1, -1}
+)
+
+// NewLocation creates a new location.
+func NewLocation(line, column int) Location {
+ return &SourceLocation{
+ line: line,
+ column: column}
+}
+
+// Line returns the 1-based line of the location.
+func (l *SourceLocation) Line() int {
+ return l.line
+}
+
+// Column returns the 0-based column number of the location.
+func (l *SourceLocation) Column() int {
+ return l.column
+}
diff --git a/vendor/github.com/google/cel-go/common/operators/BUILD.bazel b/vendor/github.com/google/cel-go/common/operators/BUILD.bazel
new file mode 100644
index 0000000..b5b67f0
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/operators/BUILD.bazel
@@ -0,0 +1,14 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "operators.go",
+ ],
+ importpath = "github.com/google/cel-go/common/operators",
+)
diff --git a/vendor/github.com/google/cel-go/common/operators/operators.go b/vendor/github.com/google/cel-go/common/operators/operators.go
new file mode 100644
index 0000000..f9b39bd
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/operators/operators.go
@@ -0,0 +1,157 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package operators defines the internal function names of operators.
+//
+// All operators in the expression language are modelled as function calls.
+package operators
+
+// String "names" for CEL operators.
+const (
+ // Symbolic operators.
+ Conditional = "_?_:_"
+ LogicalAnd = "_&&_"
+ LogicalOr = "_||_"
+ LogicalNot = "!_"
+ Equals = "_==_"
+ NotEquals = "_!=_"
+ Less = "_<_"
+ LessEquals = "_<=_"
+ Greater = "_>_"
+ GreaterEquals = "_>=_"
+ Add = "_+_"
+ Subtract = "_-_"
+ Multiply = "_*_"
+ Divide = "_/_"
+ Modulo = "_%_"
+ Negate = "-_"
+ Index = "_[_]"
+ OptIndex = "_[?_]"
+ OptSelect = "_?._"
+
+ // Macros, must have a valid identifier.
+ Has = "has"
+ All = "all"
+ Exists = "exists"
+ ExistsOne = "exists_one"
+ Map = "map"
+ Filter = "filter"
+
+ // Named operators, must not have be valid identifiers.
+ NotStrictlyFalse = "@not_strictly_false"
+ In = "@in"
+
+ // Deprecated: named operators with valid identifiers.
+ OldNotStrictlyFalse = "__not_strictly_false__"
+ OldIn = "_in_"
+)
+
+var (
+ operators = map[string]string{
+ "+": Add,
+ "/": Divide,
+ "==": Equals,
+ ">": Greater,
+ ">=": GreaterEquals,
+ "in": In,
+ "<": Less,
+ "<=": LessEquals,
+ "%": Modulo,
+ "*": Multiply,
+ "!=": NotEquals,
+ "-": Subtract,
+ }
+ // operatorMap of the operator symbol which refers to a struct containing the display name,
+ // if applicable, the operator precedence, and the arity.
+ //
+ // If the symbol does not have a display name listed in the map, it is only because it requires
+ // special casing to render properly as text.
+ operatorMap = map[string]struct {
+ displayName string
+ precedence int
+ arity int
+ }{
+ Conditional: {displayName: "", precedence: 8, arity: 3},
+ LogicalOr: {displayName: "||", precedence: 7, arity: 2},
+ LogicalAnd: {displayName: "&&", precedence: 6, arity: 2},
+ Equals: {displayName: "==", precedence: 5, arity: 2},
+ Greater: {displayName: ">", precedence: 5, arity: 2},
+ GreaterEquals: {displayName: ">=", precedence: 5, arity: 2},
+ In: {displayName: "in", precedence: 5, arity: 2},
+ Less: {displayName: "<", precedence: 5, arity: 2},
+ LessEquals: {displayName: "<=", precedence: 5, arity: 2},
+ NotEquals: {displayName: "!=", precedence: 5, arity: 2},
+ OldIn: {displayName: "in", precedence: 5, arity: 2},
+ Add: {displayName: "+", precedence: 4, arity: 2},
+ Subtract: {displayName: "-", precedence: 4, arity: 2},
+ Divide: {displayName: "/", precedence: 3, arity: 2},
+ Modulo: {displayName: "%", precedence: 3, arity: 2},
+ Multiply: {displayName: "*", precedence: 3, arity: 2},
+ LogicalNot: {displayName: "!", precedence: 2, arity: 1},
+ Negate: {displayName: "-", precedence: 2, arity: 1},
+ Index: {displayName: "", precedence: 1, arity: 2},
+ OptIndex: {displayName: "", precedence: 1, arity: 2},
+ OptSelect: {displayName: "", precedence: 1, arity: 2},
+ }
+)
+
+// Find the internal function name for an operator, if the input text is one.
+func Find(text string) (string, bool) {
+ op, found := operators[text]
+ return op, found
+}
+
+// FindReverse returns the unmangled, text representation of the operator.
+func FindReverse(symbol string) (string, bool) {
+ op, found := operatorMap[symbol]
+ if !found {
+ return "", false
+ }
+ return op.displayName, true
+}
+
+// FindReverseBinaryOperator returns the unmangled, text representation of a binary operator.
+//
+// If the symbol does refer to an operator, but the operator does not have a display name the
+// result is false.
+func FindReverseBinaryOperator(symbol string) (string, bool) {
+ op, found := operatorMap[symbol]
+ if !found || op.arity != 2 {
+ return "", false
+ }
+ if op.displayName == "" {
+ return "", false
+ }
+ return op.displayName, true
+}
+
+// Precedence returns the operator precedence, where the higher the number indicates
+// higher precedence operations.
+func Precedence(symbol string) int {
+ op, found := operatorMap[symbol]
+ if !found {
+ return 0
+ }
+ return op.precedence
+}
+
+// Arity returns the number of argument the operator takes
+// -1 is returned if an undefined symbol is provided
+func Arity(symbol string) int {
+ op, found := operatorMap[symbol]
+ if !found {
+ return -1
+ }
+ return op.arity
+}
diff --git a/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel b/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel
new file mode 100644
index 0000000..e46e2f4
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel
@@ -0,0 +1,14 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "overloads.go",
+ ],
+ importpath = "github.com/google/cel-go/common/overloads",
+)
diff --git a/vendor/github.com/google/cel-go/common/overloads/overloads.go b/vendor/github.com/google/cel-go/common/overloads/overloads.go
new file mode 100644
index 0000000..9d50f43
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/overloads/overloads.go
@@ -0,0 +1,327 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package overloads defines the internal overload identifiers for function and
+// operator overloads.
+package overloads
+
+// Boolean logic overloads
+const (
+ Conditional = "conditional"
+ LogicalAnd = "logical_and"
+ LogicalOr = "logical_or"
+ LogicalNot = "logical_not"
+ NotStrictlyFalse = "not_strictly_false"
+ Equals = "equals"
+ NotEquals = "not_equals"
+ LessBool = "less_bool"
+ LessInt64 = "less_int64"
+ LessInt64Double = "less_int64_double"
+ LessInt64Uint64 = "less_int64_uint64"
+ LessUint64 = "less_uint64"
+ LessUint64Double = "less_uint64_double"
+ LessUint64Int64 = "less_uint64_int64"
+ LessDouble = "less_double"
+ LessDoubleInt64 = "less_double_int64"
+ LessDoubleUint64 = "less_double_uint64"
+ LessString = "less_string"
+ LessBytes = "less_bytes"
+ LessTimestamp = "less_timestamp"
+ LessDuration = "less_duration"
+ LessEqualsBool = "less_equals_bool"
+ LessEqualsInt64 = "less_equals_int64"
+ LessEqualsInt64Double = "less_equals_int64_double"
+ LessEqualsInt64Uint64 = "less_equals_int64_uint64"
+ LessEqualsUint64 = "less_equals_uint64"
+ LessEqualsUint64Double = "less_equals_uint64_double"
+ LessEqualsUint64Int64 = "less_equals_uint64_int64"
+ LessEqualsDouble = "less_equals_double"
+ LessEqualsDoubleInt64 = "less_equals_double_int64"
+ LessEqualsDoubleUint64 = "less_equals_double_uint64"
+ LessEqualsString = "less_equals_string"
+ LessEqualsBytes = "less_equals_bytes"
+ LessEqualsTimestamp = "less_equals_timestamp"
+ LessEqualsDuration = "less_equals_duration"
+ GreaterBool = "greater_bool"
+ GreaterInt64 = "greater_int64"
+ GreaterInt64Double = "greater_int64_double"
+ GreaterInt64Uint64 = "greater_int64_uint64"
+ GreaterUint64 = "greater_uint64"
+ GreaterUint64Double = "greater_uint64_double"
+ GreaterUint64Int64 = "greater_uint64_int64"
+ GreaterDouble = "greater_double"
+ GreaterDoubleInt64 = "greater_double_int64"
+ GreaterDoubleUint64 = "greater_double_uint64"
+ GreaterString = "greater_string"
+ GreaterBytes = "greater_bytes"
+ GreaterTimestamp = "greater_timestamp"
+ GreaterDuration = "greater_duration"
+ GreaterEqualsBool = "greater_equals_bool"
+ GreaterEqualsInt64 = "greater_equals_int64"
+ GreaterEqualsInt64Double = "greater_equals_int64_double"
+ GreaterEqualsInt64Uint64 = "greater_equals_int64_uint64"
+ GreaterEqualsUint64 = "greater_equals_uint64"
+ GreaterEqualsUint64Double = "greater_equals_uint64_double"
+ GreaterEqualsUint64Int64 = "greater_equals_uint64_int64"
+ GreaterEqualsDouble = "greater_equals_double"
+ GreaterEqualsDoubleInt64 = "greater_equals_double_int64"
+ GreaterEqualsDoubleUint64 = "greater_equals_double_uint64"
+ GreaterEqualsString = "greater_equals_string"
+ GreaterEqualsBytes = "greater_equals_bytes"
+ GreaterEqualsTimestamp = "greater_equals_timestamp"
+ GreaterEqualsDuration = "greater_equals_duration"
+)
+
+// Math overloads
+const (
+ AddInt64 = "add_int64"
+ AddUint64 = "add_uint64"
+ AddDouble = "add_double"
+ AddString = "add_string"
+ AddBytes = "add_bytes"
+ AddList = "add_list"
+ AddTimestampDuration = "add_timestamp_duration"
+ AddDurationTimestamp = "add_duration_timestamp"
+ AddDurationDuration = "add_duration_duration"
+ SubtractInt64 = "subtract_int64"
+ SubtractUint64 = "subtract_uint64"
+ SubtractDouble = "subtract_double"
+ SubtractTimestampTimestamp = "subtract_timestamp_timestamp"
+ SubtractTimestampDuration = "subtract_timestamp_duration"
+ SubtractDurationDuration = "subtract_duration_duration"
+ MultiplyInt64 = "multiply_int64"
+ MultiplyUint64 = "multiply_uint64"
+ MultiplyDouble = "multiply_double"
+ DivideInt64 = "divide_int64"
+ DivideUint64 = "divide_uint64"
+ DivideDouble = "divide_double"
+ ModuloInt64 = "modulo_int64"
+ ModuloUint64 = "modulo_uint64"
+ NegateInt64 = "negate_int64"
+ NegateDouble = "negate_double"
+)
+
+// Index overloads
+const (
+ IndexList = "index_list"
+ IndexMap = "index_map"
+ IndexMessage = "index_message" // TODO: introduce concept of types.Message
+)
+
+// In operators
+const (
+ DeprecatedIn = "in"
+ InList = "in_list"
+ InMap = "in_map"
+ InMessage = "in_message" // TODO: introduce concept of types.Message
+)
+
+// Size overloads
+const (
+ Size = "size"
+ SizeString = "size_string"
+ SizeBytes = "size_bytes"
+ SizeList = "size_list"
+ SizeMap = "size_map"
+ SizeStringInst = "string_size"
+ SizeBytesInst = "bytes_size"
+ SizeListInst = "list_size"
+ SizeMapInst = "map_size"
+)
+
+// String function names.
+const (
+ Contains = "contains"
+ EndsWith = "endsWith"
+ Matches = "matches"
+ StartsWith = "startsWith"
+)
+
+// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations.
+const (
+ ExtQuoteString = "strings_quote"
+)
+
+// String function overload names.
+const (
+ ContainsString = "contains_string"
+ EndsWithString = "ends_with_string"
+ MatchesString = "matches_string"
+ StartsWithString = "starts_with_string"
+)
+
+// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations.
+const (
+ ExtFormatString = "string_format"
+)
+
+// Time-based functions.
+const (
+ TimeGetFullYear = "getFullYear"
+ TimeGetMonth = "getMonth"
+ TimeGetDayOfYear = "getDayOfYear"
+ TimeGetDate = "getDate"
+ TimeGetDayOfMonth = "getDayOfMonth"
+ TimeGetDayOfWeek = "getDayOfWeek"
+ TimeGetHours = "getHours"
+ TimeGetMinutes = "getMinutes"
+ TimeGetSeconds = "getSeconds"
+ TimeGetMilliseconds = "getMilliseconds"
+)
+
+// Timestamp overloads for time functions without timezones.
+const (
+ TimestampToYear = "timestamp_to_year"
+ TimestampToMonth = "timestamp_to_month"
+ TimestampToDayOfYear = "timestamp_to_day_of_year"
+ TimestampToDayOfMonthZeroBased = "timestamp_to_day_of_month"
+ TimestampToDayOfMonthOneBased = "timestamp_to_day_of_month_1_based"
+ TimestampToDayOfWeek = "timestamp_to_day_of_week"
+ TimestampToHours = "timestamp_to_hours"
+ TimestampToMinutes = "timestamp_to_minutes"
+ TimestampToSeconds = "timestamp_to_seconds"
+ TimestampToMilliseconds = "timestamp_to_milliseconds"
+)
+
+// Timestamp overloads for time functions with timezones.
+const (
+ TimestampToYearWithTz = "timestamp_to_year_with_tz"
+ TimestampToMonthWithTz = "timestamp_to_month_with_tz"
+ TimestampToDayOfYearWithTz = "timestamp_to_day_of_year_with_tz"
+ TimestampToDayOfMonthZeroBasedWithTz = "timestamp_to_day_of_month_with_tz"
+ TimestampToDayOfMonthOneBasedWithTz = "timestamp_to_day_of_month_1_based_with_tz"
+ TimestampToDayOfWeekWithTz = "timestamp_to_day_of_week_with_tz"
+ TimestampToHoursWithTz = "timestamp_to_hours_with_tz"
+ TimestampToMinutesWithTz = "timestamp_to_minutes_with_tz"
+ TimestampToSecondsWithTz = "timestamp_to_seconds_tz"
+ TimestampToMillisecondsWithTz = "timestamp_to_milliseconds_with_tz"
+)
+
+// Duration overloads for time functions.
+const (
+ DurationToHours = "duration_to_hours"
+ DurationToMinutes = "duration_to_minutes"
+ DurationToSeconds = "duration_to_seconds"
+ DurationToMilliseconds = "duration_to_milliseconds"
+)
+
+// Type conversion methods and overloads
+const (
+ TypeConvertInt = "int"
+ TypeConvertUint = "uint"
+ TypeConvertDouble = "double"
+ TypeConvertBool = "bool"
+ TypeConvertString = "string"
+ TypeConvertBytes = "bytes"
+ TypeConvertTimestamp = "timestamp"
+ TypeConvertDuration = "duration"
+ TypeConvertType = "type"
+ TypeConvertDyn = "dyn"
+)
+
+// Int conversion functions.
+const (
+ IntToInt = "int64_to_int64"
+ UintToInt = "uint64_to_int64"
+ DoubleToInt = "double_to_int64"
+ StringToInt = "string_to_int64"
+ TimestampToInt = "timestamp_to_int64"
+ DurationToInt = "duration_to_int64"
+)
+
+// Uint conversion functions.
+const (
+ UintToUint = "uint64_to_uint64"
+ IntToUint = "int64_to_uint64"
+ DoubleToUint = "double_to_uint64"
+ StringToUint = "string_to_uint64"
+)
+
+// Double conversion functions.
+const (
+ DoubleToDouble = "double_to_double"
+ IntToDouble = "int64_to_double"
+ UintToDouble = "uint64_to_double"
+ StringToDouble = "string_to_double"
+)
+
+// Bool conversion functions.
+const (
+ BoolToBool = "bool_to_bool"
+ StringToBool = "string_to_bool"
+)
+
+// Bytes conversion functions.
+const (
+ BytesToBytes = "bytes_to_bytes"
+ StringToBytes = "string_to_bytes"
+)
+
+// String conversion functions.
+const (
+ StringToString = "string_to_string"
+ BoolToString = "bool_to_string"
+ IntToString = "int64_to_string"
+ UintToString = "uint64_to_string"
+ DoubleToString = "double_to_string"
+ BytesToString = "bytes_to_string"
+ TimestampToString = "timestamp_to_string"
+ DurationToString = "duration_to_string"
+)
+
+// Timestamp conversion functions
+const (
+ TimestampToTimestamp = "timestamp_to_timestamp"
+ StringToTimestamp = "string_to_timestamp"
+ IntToTimestamp = "int64_to_timestamp"
+)
+
+// Convert duration from string
+const (
+ DurationToDuration = "duration_to_duration"
+ StringToDuration = "string_to_duration"
+ IntToDuration = "int64_to_duration"
+)
+
+// Convert to dyn
+const (
+ ToDyn = "to_dyn"
+)
+
+// Comprehensions helper methods, not directly accessible via a developer.
+const (
+ Iterator = "@iterator"
+ HasNext = "@hasNext"
+ Next = "@next"
+)
+
+// IsTypeConversionFunction returns whether the input function is a standard library type
+// conversion function.
+func IsTypeConversionFunction(function string) bool {
+ switch function {
+ case TypeConvertBool,
+ TypeConvertBytes,
+ TypeConvertDouble,
+ TypeConvertDuration,
+ TypeConvertDyn,
+ TypeConvertInt,
+ TypeConvertString,
+ TypeConvertTimestamp,
+ TypeConvertType,
+ TypeConvertUint:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/runes/BUILD.bazel b/vendor/github.com/google/cel-go/common/runes/BUILD.bazel
new file mode 100644
index 0000000..bb30242
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/runes/BUILD.bazel
@@ -0,0 +1,25 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "buffer.go",
+ ],
+ importpath = "github.com/google/cel-go/common/runes",
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "buffer_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/runes/buffer.go b/vendor/github.com/google/cel-go/common/runes/buffer.go
new file mode 100644
index 0000000..50aac0b
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/runes/buffer.go
@@ -0,0 +1,194 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package runes provides interfaces and utilities for working with runes.
+package runes
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+// Buffer is an interface for accessing a contiguous array of code points.
+type Buffer interface {
+ Get(i int) rune
+ Slice(i, j int) string
+ Len() int
+}
+
+type emptyBuffer struct{}
+
+func (e *emptyBuffer) Get(i int) rune {
+ panic("slice index out of bounds")
+}
+
+func (e *emptyBuffer) Slice(i, j int) string {
+ if i != 0 || i != j {
+ panic("slice index out of bounds")
+ }
+ return ""
+}
+
+func (e *emptyBuffer) Len() int {
+ return 0
+}
+
+var _ Buffer = &emptyBuffer{}
+
+// asciiBuffer is an implementation for an array of code points that contain code points only from
+// the ASCII character set.
+type asciiBuffer struct {
+ arr []byte
+}
+
+func (a *asciiBuffer) Get(i int) rune {
+ return rune(uint32(a.arr[i]))
+}
+
+func (a *asciiBuffer) Slice(i, j int) string {
+ return string(a.arr[i:j])
+}
+
+func (a *asciiBuffer) Len() int {
+ return len(a.arr)
+}
+
+var _ Buffer = &asciiBuffer{}
+
+// basicBuffer is an implementation for an array of code points that contain code points from both
+// the Latin-1 character set and Basic Multilingual Plane.
+type basicBuffer struct {
+ arr []uint16
+}
+
+func (b *basicBuffer) Get(i int) rune {
+ return rune(uint32(b.arr[i]))
+}
+
+func (b *basicBuffer) Slice(i, j int) string {
+ var str strings.Builder
+ str.Grow((j - i) * 3) // Worst case encoding size for 0xffff is 3.
+ for ; i < j; i++ {
+ str.WriteRune(rune(uint32(b.arr[i])))
+ }
+ return str.String()
+}
+
+func (b *basicBuffer) Len() int {
+ return len(b.arr)
+}
+
+var _ Buffer = &basicBuffer{}
+
+// supplementalBuffer is an implementation for an array of code points that contain code points from
+// the Latin-1 character set, Basic Multilingual Plane, or the Supplemental Multilingual Plane.
+type supplementalBuffer struct {
+ arr []rune
+}
+
+func (s *supplementalBuffer) Get(i int) rune {
+ return rune(uint32(s.arr[i]))
+}
+
+func (s *supplementalBuffer) Slice(i, j int) string {
+ return string(s.arr[i:j])
+}
+
+func (s *supplementalBuffer) Len() int {
+ return len(s.arr)
+}
+
+var _ Buffer = &supplementalBuffer{}
+
+var nilBuffer = &emptyBuffer{}
+
+// NewBuffer returns an efficient implementation of Buffer for the given text based on the ranges of
+// the encoded code points contained within.
+//
+// Code points are represented as an array of byte, uint16, or rune. This approach ensures that
+// each index represents a code point by itself without needing to use an array of rune. At first
+// we assume all code points are less than or equal to '\u007f'. If this holds true, the
+// underlying storage is a byte array containing only ASCII characters. If we encountered a code
+// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the
+// elements of previous byte array to the uint16 array, and continue. If this holds true, the
+// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual
+// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous
+// elements of the byte or uint16 array, and continue. The underlying storage is an rune array
+// containing any Unicode character.
+func NewBuffer(data string) Buffer {
+ if len(data) == 0 {
+ return nilBuffer
+ }
+ var (
+ idx = 0
+ buf8 = make([]byte, 0, len(data))
+ buf16 []uint16
+ buf32 []rune
+ )
+ for idx < len(data) {
+ r, s := utf8.DecodeRuneInString(data[idx:])
+ idx += s
+ if r < utf8.RuneSelf {
+ buf8 = append(buf8, byte(r))
+ continue
+ }
+ if r <= 0xffff {
+ buf16 = make([]uint16, len(buf8), len(data))
+ for i, v := range buf8 {
+ buf16[i] = uint16(v)
+ }
+ buf8 = nil
+ buf16 = append(buf16, uint16(r))
+ goto copy16
+ }
+ buf32 = make([]rune, len(buf8), len(data))
+ for i, v := range buf8 {
+ buf32[i] = rune(uint32(v))
+ }
+ buf8 = nil
+ buf32 = append(buf32, r)
+ goto copy32
+ }
+ return &asciiBuffer{
+ arr: buf8,
+ }
+copy16:
+ for idx < len(data) {
+ r, s := utf8.DecodeRuneInString(data[idx:])
+ idx += s
+ if r <= 0xffff {
+ buf16 = append(buf16, uint16(r))
+ continue
+ }
+ buf32 = make([]rune, len(buf16), len(data))
+ for i, v := range buf16 {
+ buf32[i] = rune(uint32(v))
+ }
+ buf16 = nil
+ buf32 = append(buf32, r)
+ goto copy32
+ }
+ return &basicBuffer{
+ arr: buf16,
+ }
+copy32:
+ for idx < len(data) {
+ r, s := utf8.DecodeRuneInString(data[idx:])
+ idx += s
+ buf32 = append(buf32, r)
+ }
+ return &supplementalBuffer{
+ arr: buf32,
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/source.go b/vendor/github.com/google/cel-go/common/source.go
new file mode 100644
index 0000000..acf22bd
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/source.go
@@ -0,0 +1,183 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "strings"
+ "unicode/utf8"
+
+ "github.com/google/cel-go/common/runes"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Source interface for filter source contents.
+type Source interface {
+ // Content returns the source content represented as a string.
+ // Examples contents are the single file contents, textbox field,
+ // or url parameter.
+ Content() string
+
+ // Description gives a brief description of the source.
+ // Example descriptions are a file name or ui element.
+ Description() string
+
+ // LineOffsets gives the character offsets at which lines occur.
+ // The zero-th entry should refer to the break between the first
+ // and second line, or EOF if there is only one line of source.
+ LineOffsets() []int32
+
+ // LocationOffset translates a Location to an offset.
+ // Given the line and column of the Location returns the
+ // Location's character offset in the Source, and a bool
+ // indicating whether the Location was found.
+ LocationOffset(location Location) (int32, bool)
+
+ // OffsetLocation translates a character offset to a Location, or
+ // false if the conversion was not feasible.
+ OffsetLocation(offset int32) (Location, bool)
+
+ // NewLocation takes an input line and column and produces a Location.
+ // The default behavior is to treat the line and column as absolute,
+ // but concrete derivations may use this method to convert a relative
+ // line and column position into an absolute location.
+ NewLocation(line, col int) Location
+
+ // Snippet returns a line of content and whether the line was found.
+ Snippet(line int) (string, bool)
+}
+
+// The sourceImpl type implementation of the Source interface.
+type sourceImpl struct {
+ runes.Buffer
+ description string
+ lineOffsets []int32
+}
+
+var _ runes.Buffer = &sourceImpl{}
+
+// TODO(jimlarson) "Character offsets" should index the code points
+// within the UTF-8 encoded string. It currently indexes bytes.
+// Can be accomplished by using rune[] instead of string for contents.
+
+// NewTextSource creates a new Source from the input text string.
+func NewTextSource(text string) Source {
+ return NewStringSource(text, "")
+}
+
+// NewStringSource creates a new Source from the given contents and description.
+func NewStringSource(contents string, description string) Source {
+ // Compute line offsets up front as they are referred to frequently.
+ lines := strings.Split(contents, "\n")
+ offsets := make([]int32, len(lines))
+ var offset int32
+ for i, line := range lines {
+ offset = offset + int32(utf8.RuneCountInString(line)) + 1
+ offsets[int32(i)] = offset
+ }
+ return &sourceImpl{
+ Buffer: runes.NewBuffer(contents),
+ description: description,
+ lineOffsets: offsets,
+ }
+}
+
+// NewInfoSource creates a new Source from a SourceInfo.
+func NewInfoSource(info *exprpb.SourceInfo) Source {
+ return &sourceImpl{
+ Buffer: runes.NewBuffer(""),
+ description: info.GetLocation(),
+ lineOffsets: info.GetLineOffsets(),
+ }
+}
+
+// Content implements the Source interface method.
+func (s *sourceImpl) Content() string {
+ return s.Slice(0, s.Len())
+}
+
+// Description implements the Source interface method.
+func (s *sourceImpl) Description() string {
+ return s.description
+}
+
+// LineOffsets implements the Source interface method.
+func (s *sourceImpl) LineOffsets() []int32 {
+ return s.lineOffsets
+}
+
+// LocationOffset implements the Source interface method.
+func (s *sourceImpl) LocationOffset(location Location) (int32, bool) {
+ if lineOffset, found := s.findLineOffset(location.Line()); found {
+ return lineOffset + int32(location.Column()), true
+ }
+ return -1, false
+}
+
+// NewLocation implements the Source interface method.
+func (s *sourceImpl) NewLocation(line, col int) Location {
+ return NewLocation(line, col)
+}
+
+// OffsetLocation implements the Source interface method.
+func (s *sourceImpl) OffsetLocation(offset int32) (Location, bool) {
+ line, lineOffset := s.findLine(offset)
+ return NewLocation(int(line), int(offset-lineOffset)), true
+}
+
+// Snippet implements the Source interface method.
+func (s *sourceImpl) Snippet(line int) (string, bool) {
+ charStart, found := s.findLineOffset(line)
+ if !found || s.Len() == 0 {
+ return "", false
+ }
+ charEnd, found := s.findLineOffset(line + 1)
+ if found {
+ return s.Slice(int(charStart), int(charEnd-1)), true
+ }
+ return s.Slice(int(charStart), s.Len()), true
+}
+
+// findLineOffset returns the offset where the (1-indexed) line begins,
+// or false if line doesn't exist.
+func (s *sourceImpl) findLineOffset(line int) (int32, bool) {
+ if line == 1 {
+ return 0, true
+ }
+ if line > 1 && line <= int(len(s.lineOffsets)) {
+ offset := s.lineOffsets[line-2]
+ return offset, true
+ }
+ return -1, false
+}
+
+// findLine finds the line that contains the given character offset and
+// returns the line number and offset of the beginning of that line.
+// Note that the last line is treated as if it contains all offsets
+// beyond the end of the actual source.
+func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) {
+ var line int32 = 1
+ for _, lineOffset := range s.lineOffsets {
+ if lineOffset > characterOffset {
+ break
+ } else {
+ line++
+ }
+ }
+ if line == 1 {
+ return line, 0
+ }
+ return line, s.lineOffsets[line-2]
+}
diff --git a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel
new file mode 100644
index 0000000..c130a93
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel
@@ -0,0 +1,25 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "standard.go",
+ ],
+ importpath = "github.com/google/cel-go/common/stdlib",
+ deps = [
+ "//checker/decls:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/functions:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ ],
+)
\ No newline at end of file
diff --git a/vendor/github.com/google/cel-go/common/stdlib/standard.go b/vendor/github.com/google/cel-go/common/stdlib/standard.go
new file mode 100644
index 0000000..d02cb64
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/stdlib/standard.go
@@ -0,0 +1,661 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package stdlib contains all of the standard library function declarations and definitions for CEL.
+package stdlib
+
+import (
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+var (
+ stdFunctions []*decls.FunctionDecl
+ stdFnDecls []*exprpb.Decl
+ stdTypes []*decls.VariableDecl
+ stdTypeDecls []*exprpb.Decl
+)
+
+func init() {
+ paramA := types.NewTypeParamType("A")
+ paramB := types.NewTypeParamType("B")
+ listOfA := types.NewListType(paramA)
+ mapOfAB := types.NewMapType(paramA, paramB)
+
+ stdTypes = []*decls.VariableDecl{
+ decls.TypeVariable(types.BoolType),
+ decls.TypeVariable(types.BytesType),
+ decls.TypeVariable(types.DoubleType),
+ decls.TypeVariable(types.DurationType),
+ decls.TypeVariable(types.IntType),
+ decls.TypeVariable(listOfA),
+ decls.TypeVariable(mapOfAB),
+ decls.TypeVariable(types.NullType),
+ decls.TypeVariable(types.StringType),
+ decls.TypeVariable(types.TimestampType),
+ decls.TypeVariable(types.TypeType),
+ decls.TypeVariable(types.UintType),
+ }
+
+ stdTypeDecls = make([]*exprpb.Decl, 0, len(stdTypes))
+ for _, stdType := range stdTypes {
+ typeVar, err := decls.VariableDeclToExprDecl(stdType)
+ if err != nil {
+ panic(err)
+ }
+ stdTypeDecls = append(stdTypeDecls, typeVar)
+ }
+
+ stdFunctions = []*decls.FunctionDecl{
+ // Logical operators. Special-cased within the interpreter.
+ // Note, the singleton binding prevents extensions from overriding the operator behavior.
+ function(operators.Conditional,
+ decls.Overload(overloads.Conditional, argTypes(types.BoolType, paramA, paramA), paramA,
+ decls.OverloadIsNonStrict()),
+ decls.SingletonFunctionBinding(noFunctionOverrides)),
+ function(operators.LogicalAnd,
+ decls.Overload(overloads.LogicalAnd, argTypes(types.BoolType, types.BoolType), types.BoolType,
+ decls.OverloadIsNonStrict()),
+ decls.SingletonBinaryBinding(noBinaryOverrides)),
+ function(operators.LogicalOr,
+ decls.Overload(overloads.LogicalOr, argTypes(types.BoolType, types.BoolType), types.BoolType,
+ decls.OverloadIsNonStrict()),
+ decls.SingletonBinaryBinding(noBinaryOverrides)),
+ function(operators.LogicalNot,
+ decls.Overload(overloads.LogicalNot, argTypes(types.BoolType), types.BoolType),
+ decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
+ b, ok := val.(types.Bool)
+ if !ok {
+ return types.MaybeNoSuchOverloadErr(val)
+ }
+ return b.Negate()
+ })),
+
+ // Comprehension short-circuiting related function
+ function(operators.NotStrictlyFalse,
+ decls.Overload(overloads.NotStrictlyFalse, argTypes(types.BoolType), types.BoolType,
+ decls.OverloadIsNonStrict(),
+ decls.UnaryBinding(notStrictlyFalse))),
+ // Deprecated: __not_strictly_false__
+ function(operators.OldNotStrictlyFalse,
+ decls.DisableDeclaration(true), // safe deprecation
+ decls.Overload(operators.OldNotStrictlyFalse, argTypes(types.BoolType), types.BoolType,
+ decls.OverloadIsNonStrict(),
+ decls.UnaryBinding(notStrictlyFalse))),
+
+ // Equality / inequality. Special-cased in the interpreter
+ function(operators.Equals,
+ decls.Overload(overloads.Equals, argTypes(paramA, paramA), types.BoolType),
+ decls.SingletonBinaryBinding(noBinaryOverrides)),
+ function(operators.NotEquals,
+ decls.Overload(overloads.NotEquals, argTypes(paramA, paramA), types.BoolType),
+ decls.SingletonBinaryBinding(noBinaryOverrides)),
+
+ // Mathematical operators
+ function(operators.Add,
+ decls.Overload(overloads.AddBytes,
+ argTypes(types.BytesType, types.BytesType), types.BytesType),
+ decls.Overload(overloads.AddDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.AddDurationDuration,
+ argTypes(types.DurationType, types.DurationType), types.DurationType),
+ decls.Overload(overloads.AddDurationTimestamp,
+ argTypes(types.DurationType, types.TimestampType), types.TimestampType),
+ decls.Overload(overloads.AddTimestampDuration,
+ argTypes(types.TimestampType, types.DurationType), types.TimestampType),
+ decls.Overload(overloads.AddInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.AddList,
+ argTypes(listOfA, listOfA), listOfA),
+ decls.Overload(overloads.AddString,
+ argTypes(types.StringType, types.StringType), types.StringType),
+ decls.Overload(overloads.AddUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Adder).Add(rhs)
+ }, traits.AdderType)),
+ function(operators.Divide,
+ decls.Overload(overloads.DivideDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.DivideInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.DivideUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Divider).Divide(rhs)
+ }, traits.DividerType)),
+ function(operators.Modulo,
+ decls.Overload(overloads.ModuloInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.ModuloUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Modder).Modulo(rhs)
+ }, traits.ModderType)),
+ function(operators.Multiply,
+ decls.Overload(overloads.MultiplyDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.MultiplyInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.MultiplyUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Multiplier).Multiply(rhs)
+ }, traits.MultiplierType)),
+ function(operators.Negate,
+ decls.Overload(overloads.NegateDouble, argTypes(types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.NegateInt64, argTypes(types.IntType), types.IntType),
+ decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
+ if types.IsBool(val) {
+ return types.MaybeNoSuchOverloadErr(val)
+ }
+ return val.(traits.Negater).Negate()
+ }, traits.NegatorType)),
+ function(operators.Subtract,
+ decls.Overload(overloads.SubtractDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.SubtractDurationDuration,
+ argTypes(types.DurationType, types.DurationType), types.DurationType),
+ decls.Overload(overloads.SubtractInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.SubtractTimestampDuration,
+ argTypes(types.TimestampType, types.DurationType), types.TimestampType),
+ decls.Overload(overloads.SubtractTimestampTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.DurationType),
+ decls.Overload(overloads.SubtractUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Subtractor).Subtract(rhs)
+ }, traits.SubtractorType)),
+
+ // Relations operators
+
+ function(operators.Less,
+ decls.Overload(overloads.LessBool,
+ argTypes(types.BoolType, types.BoolType), types.BoolType),
+ decls.Overload(overloads.LessInt64,
+ argTypes(types.IntType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessInt64Double,
+ argTypes(types.IntType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessInt64Uint64,
+ argTypes(types.IntType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessUint64,
+ argTypes(types.UintType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessUint64Double,
+ argTypes(types.UintType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessUint64Int64,
+ argTypes(types.UintType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessDoubleInt64,
+ argTypes(types.DoubleType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessDoubleUint64,
+ argTypes(types.DoubleType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.Overload(overloads.LessBytes,
+ argTypes(types.BytesType, types.BytesType), types.BoolType),
+ decls.Overload(overloads.LessTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ decls.Overload(overloads.LessDuration,
+ argTypes(types.DurationType, types.DurationType), types.BoolType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntNegOne {
+ return types.True
+ }
+ if cmp == types.IntOne || cmp == types.IntZero {
+ return types.False
+ }
+ return cmp
+ }, traits.ComparerType)),
+
+ function(operators.LessEquals,
+ decls.Overload(overloads.LessEqualsBool,
+ argTypes(types.BoolType, types.BoolType), types.BoolType),
+ decls.Overload(overloads.LessEqualsInt64,
+ argTypes(types.IntType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessEqualsInt64Double,
+ argTypes(types.IntType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessEqualsInt64Uint64,
+ argTypes(types.IntType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessEqualsUint64,
+ argTypes(types.UintType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessEqualsUint64Double,
+ argTypes(types.UintType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessEqualsUint64Int64,
+ argTypes(types.UintType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessEqualsDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessEqualsDoubleInt64,
+ argTypes(types.DoubleType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessEqualsDoubleUint64,
+ argTypes(types.DoubleType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessEqualsString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.Overload(overloads.LessEqualsBytes,
+ argTypes(types.BytesType, types.BytesType), types.BoolType),
+ decls.Overload(overloads.LessEqualsTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ decls.Overload(overloads.LessEqualsDuration,
+ argTypes(types.DurationType, types.DurationType), types.BoolType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntNegOne || cmp == types.IntZero {
+ return types.True
+ }
+ if cmp == types.IntOne {
+ return types.False
+ }
+ return cmp
+ }, traits.ComparerType)),
+
+ function(operators.Greater,
+ decls.Overload(overloads.GreaterBool,
+ argTypes(types.BoolType, types.BoolType), types.BoolType),
+ decls.Overload(overloads.GreaterInt64,
+ argTypes(types.IntType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterInt64Double,
+ argTypes(types.IntType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterInt64Uint64,
+ argTypes(types.IntType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterUint64,
+ argTypes(types.UintType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterUint64Double,
+ argTypes(types.UintType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterUint64Int64,
+ argTypes(types.UintType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterDoubleInt64,
+ argTypes(types.DoubleType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterDoubleUint64,
+ argTypes(types.DoubleType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.Overload(overloads.GreaterBytes,
+ argTypes(types.BytesType, types.BytesType), types.BoolType),
+ decls.Overload(overloads.GreaterTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ decls.Overload(overloads.GreaterDuration,
+ argTypes(types.DurationType, types.DurationType), types.BoolType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntOne {
+ return types.True
+ }
+ if cmp == types.IntNegOne || cmp == types.IntZero {
+ return types.False
+ }
+ return cmp
+ }, traits.ComparerType)),
+
+ function(operators.GreaterEquals,
+ decls.Overload(overloads.GreaterEqualsBool,
+ argTypes(types.BoolType, types.BoolType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsInt64,
+ argTypes(types.IntType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsInt64Double,
+ argTypes(types.IntType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsInt64Uint64,
+ argTypes(types.IntType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsUint64,
+ argTypes(types.UintType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsUint64Double,
+ argTypes(types.UintType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsUint64Int64,
+ argTypes(types.UintType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsDoubleInt64,
+ argTypes(types.DoubleType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsDoubleUint64,
+ argTypes(types.DoubleType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsBytes,
+ argTypes(types.BytesType, types.BytesType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsDuration,
+ argTypes(types.DurationType, types.DurationType), types.BoolType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntOne || cmp == types.IntZero {
+ return types.True
+ }
+ if cmp == types.IntNegOne {
+ return types.False
+ }
+ return cmp
+ }, traits.ComparerType)),
+
+ // Indexing
+ function(operators.Index,
+ decls.Overload(overloads.IndexList, argTypes(listOfA, types.IntType), paramA),
+ decls.Overload(overloads.IndexMap, argTypes(mapOfAB, paramA), paramB),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Indexer).Get(rhs)
+ }, traits.IndexerType)),
+
+ // Collections operators
+ function(operators.In,
+ decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType),
+ decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType),
+ decls.SingletonBinaryBinding(inAggregate)),
+ function(operators.OldIn,
+ decls.DisableDeclaration(true), // safe deprecation
+ decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType),
+ decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType),
+ decls.SingletonBinaryBinding(inAggregate)),
+ function(overloads.DeprecatedIn,
+ decls.DisableDeclaration(true), // safe deprecation
+ decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType),
+ decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType),
+ decls.SingletonBinaryBinding(inAggregate)),
+ function(overloads.Size,
+ decls.Overload(overloads.SizeBytes, argTypes(types.BytesType), types.IntType),
+ decls.MemberOverload(overloads.SizeBytesInst, argTypes(types.BytesType), types.IntType),
+ decls.Overload(overloads.SizeList, argTypes(listOfA), types.IntType),
+ decls.MemberOverload(overloads.SizeListInst, argTypes(listOfA), types.IntType),
+ decls.Overload(overloads.SizeMap, argTypes(mapOfAB), types.IntType),
+ decls.MemberOverload(overloads.SizeMapInst, argTypes(mapOfAB), types.IntType),
+ decls.Overload(overloads.SizeString, argTypes(types.StringType), types.IntType),
+ decls.MemberOverload(overloads.SizeStringInst, argTypes(types.StringType), types.IntType),
+ decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
+ return val.(traits.Sizer).Size()
+ }, traits.SizerType)),
+
+ // Type conversions
+ function(overloads.TypeConvertType,
+ decls.Overload(overloads.TypeConvertType, argTypes(paramA), types.NewTypeTypeWithParam(paramA)),
+ decls.SingletonUnaryBinding(convertToType(types.TypeType))),
+
+ // Bool conversions
+ function(overloads.TypeConvertBool,
+ decls.Overload(overloads.BoolToBool, argTypes(types.BoolType), types.BoolType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.StringToBool, argTypes(types.StringType), types.BoolType,
+ decls.UnaryBinding(convertToType(types.BoolType)))),
+
+ // Bytes conversions
+ function(overloads.TypeConvertBytes,
+ decls.Overload(overloads.BytesToBytes, argTypes(types.BytesType), types.BytesType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.StringToBytes, argTypes(types.StringType), types.BytesType,
+ decls.UnaryBinding(convertToType(types.BytesType)))),
+
+ // Double conversions
+ function(overloads.TypeConvertDouble,
+ decls.Overload(overloads.DoubleToDouble, argTypes(types.DoubleType), types.DoubleType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.IntToDouble, argTypes(types.IntType), types.DoubleType,
+ decls.UnaryBinding(convertToType(types.DoubleType))),
+ decls.Overload(overloads.StringToDouble, argTypes(types.StringType), types.DoubleType,
+ decls.UnaryBinding(convertToType(types.DoubleType))),
+ decls.Overload(overloads.UintToDouble, argTypes(types.UintType), types.DoubleType,
+ decls.UnaryBinding(convertToType(types.DoubleType)))),
+
+ // Duration conversions
+ function(overloads.TypeConvertDuration,
+ decls.Overload(overloads.DurationToDuration, argTypes(types.DurationType), types.DurationType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.IntToDuration, argTypes(types.IntType), types.DurationType,
+ decls.UnaryBinding(convertToType(types.DurationType))),
+ decls.Overload(overloads.StringToDuration, argTypes(types.StringType), types.DurationType,
+ decls.UnaryBinding(convertToType(types.DurationType)))),
+
+ // Dyn conversions
+ function(overloads.TypeConvertDyn,
+ decls.Overload(overloads.ToDyn, argTypes(paramA), types.DynType),
+ decls.SingletonUnaryBinding(identity)),
+
+ // Int conversions
+ function(overloads.TypeConvertInt,
+ decls.Overload(overloads.IntToInt, argTypes(types.IntType), types.IntType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.DoubleToInt, argTypes(types.DoubleType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ decls.Overload(overloads.DurationToInt, argTypes(types.DurationType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ decls.Overload(overloads.StringToInt, argTypes(types.StringType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ decls.Overload(overloads.TimestampToInt, argTypes(types.TimestampType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ decls.Overload(overloads.UintToInt, argTypes(types.UintType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ ),
+
+ // String conversions
+ function(overloads.TypeConvertString,
+ decls.Overload(overloads.StringToString, argTypes(types.StringType), types.StringType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.BoolToString, argTypes(types.BoolType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.BytesToString, argTypes(types.BytesType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.DoubleToString, argTypes(types.DoubleType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.DurationToString, argTypes(types.DurationType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.IntToString, argTypes(types.IntType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.TimestampToString, argTypes(types.TimestampType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.UintToString, argTypes(types.UintType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType)))),
+
+ // Timestamp conversions
+ function(overloads.TypeConvertTimestamp,
+ decls.Overload(overloads.TimestampToTimestamp, argTypes(types.TimestampType), types.TimestampType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.IntToTimestamp, argTypes(types.IntType), types.TimestampType,
+ decls.UnaryBinding(convertToType(types.TimestampType))),
+ decls.Overload(overloads.StringToTimestamp, argTypes(types.StringType), types.TimestampType,
+ decls.UnaryBinding(convertToType(types.TimestampType)))),
+
+ // Uint conversions
+ function(overloads.TypeConvertUint,
+ decls.Overload(overloads.UintToUint, argTypes(types.UintType), types.UintType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.DoubleToUint, argTypes(types.DoubleType), types.UintType,
+ decls.UnaryBinding(convertToType(types.UintType))),
+ decls.Overload(overloads.IntToUint, argTypes(types.IntType), types.UintType,
+ decls.UnaryBinding(convertToType(types.UintType))),
+ decls.Overload(overloads.StringToUint, argTypes(types.StringType), types.UintType,
+ decls.UnaryBinding(convertToType(types.UintType)))),
+
+ // String functions
+ function(overloads.Contains,
+ decls.MemberOverload(overloads.ContainsString,
+ argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.BinaryBinding(types.StringContains)),
+ decls.DisableTypeGuards(true)),
+ function(overloads.EndsWith,
+ decls.MemberOverload(overloads.EndsWithString,
+ argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.BinaryBinding(types.StringEndsWith)),
+ decls.DisableTypeGuards(true)),
+ function(overloads.StartsWith,
+ decls.MemberOverload(overloads.StartsWithString,
+ argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.BinaryBinding(types.StringStartsWith)),
+ decls.DisableTypeGuards(true)),
+ function(overloads.Matches,
+ decls.Overload(overloads.Matches, argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.MemberOverload(overloads.MatchesString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.SingletonBinaryBinding(func(str, pat ref.Val) ref.Val {
+ return str.(traits.Matcher).Match(pat)
+ }, traits.MatcherType)),
+
+ // Timestamp / duration functions
+ function(overloads.TimeGetFullYear,
+ decls.MemberOverload(overloads.TimestampToYear,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToYearWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetMonth,
+ decls.MemberOverload(overloads.TimestampToMonth,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToMonthWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetDayOfYear,
+ decls.MemberOverload(overloads.TimestampToDayOfYear,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToDayOfYearWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetDayOfMonth,
+ decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBased,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetDate,
+ decls.MemberOverload(overloads.TimestampToDayOfMonthOneBased,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetDayOfWeek,
+ decls.MemberOverload(overloads.TimestampToDayOfWeek,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToDayOfWeekWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetHours,
+ decls.MemberOverload(overloads.TimestampToHours,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToHoursWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType),
+ decls.MemberOverload(overloads.DurationToHours,
+ argTypes(types.DurationType), types.IntType)),
+
+ function(overloads.TimeGetMinutes,
+ decls.MemberOverload(overloads.TimestampToMinutes,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToMinutesWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType),
+ decls.MemberOverload(overloads.DurationToMinutes,
+ argTypes(types.DurationType), types.IntType)),
+
+ function(overloads.TimeGetSeconds,
+ decls.MemberOverload(overloads.TimestampToSeconds,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToSecondsWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType),
+ decls.MemberOverload(overloads.DurationToSeconds,
+ argTypes(types.DurationType), types.IntType)),
+
+ function(overloads.TimeGetMilliseconds,
+ decls.MemberOverload(overloads.TimestampToMilliseconds,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToMillisecondsWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType),
+ decls.MemberOverload(overloads.DurationToMilliseconds,
+ argTypes(types.DurationType), types.IntType)),
+ }
+
+ stdFnDecls = make([]*exprpb.Decl, 0, len(stdFunctions))
+ for _, fn := range stdFunctions {
+ if fn.IsDeclarationDisabled() {
+ continue
+ }
+ ed, err := decls.FunctionDeclToExprDecl(fn)
+ if err != nil {
+ panic(err)
+ }
+ stdFnDecls = append(stdFnDecls, ed)
+ }
+}
+
+// Functions returns the set of standard library function declarations and definitions for CEL.
+func Functions() []*decls.FunctionDecl {
+ return stdFunctions
+}
+
+// FunctionExprDecls returns the legacy style protobuf-typed declarations for all functions and overloads
+// in the CEL standard environment.
+//
+// Deprecated: use Functions
+func FunctionExprDecls() []*exprpb.Decl {
+ return stdFnDecls
+}
+
+// Types returns the set of standard library types for CEL.
+func Types() []*decls.VariableDecl {
+ return stdTypes
+}
+
+// TypeExprDecls returns the legacy style protobuf-typed declarations for all types in the CEL
+// standard environment.
+//
+// Deprecated: use Types
+func TypeExprDecls() []*exprpb.Decl {
+ return stdTypeDecls
+}
+
+func notStrictlyFalse(value ref.Val) ref.Val {
+ if types.IsBool(value) {
+ return value
+ }
+ return types.True
+}
+
+func inAggregate(lhs ref.Val, rhs ref.Val) ref.Val {
+ if rhs.Type().HasTrait(traits.ContainerType) {
+ return rhs.(traits.Container).Contains(lhs)
+ }
+ return types.ValOrErr(rhs, "no such overload")
+}
+
+func function(name string, opts ...decls.FunctionOpt) *decls.FunctionDecl {
+ fn, err := decls.NewFunction(name, opts...)
+ if err != nil {
+ panic(err)
+ }
+ return fn
+}
+
+func argTypes(args ...*types.Type) []*types.Type {
+ return args
+}
+
+func noBinaryOverrides(rhs, lhs ref.Val) ref.Val {
+ return types.NoSuchOverloadErr()
+}
+
+func noFunctionOverrides(args ...ref.Val) ref.Val {
+ return types.NoSuchOverloadErr()
+}
+
+func identity(val ref.Val) ref.Val {
+ return val
+}
+
+func convertToType(t ref.Type) functions.UnaryOp {
+ return func(val ref.Val) ref.Val {
+ return val.ConvertToType(t)
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/BUILD.bazel
new file mode 100644
index 0000000..b5e44ff
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/BUILD.bazel
@@ -0,0 +1,90 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "any_value.go",
+ "bool.go",
+ "bytes.go",
+ "compare.go",
+ "double.go",
+ "duration.go",
+ "err.go",
+ "int.go",
+ "iterator.go",
+ "json_value.go",
+ "list.go",
+ "map.go",
+ "null.go",
+ "object.go",
+ "optional.go",
+ "overflow.go",
+ "provider.go",
+ "string.go",
+ "timestamp.go",
+ "types.go",
+ "uint.go",
+ "unknown.go",
+ "util.go",
+ ],
+ importpath = "github.com/google/cel-go/common/types",
+ deps = [
+ "//checker/decls:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types/pb:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "@com_github_stoewer_go_strcase//:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//encoding/protojson:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
+ "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "bool_test.go",
+ "bytes_test.go",
+ "double_test.go",
+ "duration_test.go",
+ "int_test.go",
+ "json_list_test.go",
+ "json_struct_test.go",
+ "list_test.go",
+ "map_test.go",
+ "null_test.go",
+ "object_test.go",
+ "optional_test.go",
+ "provider_test.go",
+ "string_test.go",
+ "timestamp_test.go",
+ "types_test.go",
+ "uint_test.go",
+ "unknown_test.go",
+ "util_test.go",
+ ],
+ embed = [":go_default_library"],
+ deps = [
+ "//common/types/ref:go_default_library",
+ "//test:go_default_library",
+ "//test/proto3pb:test_all_types_go_proto",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//encoding/protojson:go_default_library",
+ "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/types/any_value.go b/vendor/github.com/google/cel-go/common/types/any_value.go
new file mode 100644
index 0000000..cda0f13
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/any_value.go
@@ -0,0 +1,24 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "reflect"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+// anyValueType constant representing the reflected type of google.protobuf.Any.
+var anyValueType = reflect.TypeOf(&anypb.Any{})
diff --git a/vendor/github.com/google/cel-go/common/types/bool.go b/vendor/github.com/google/cel-go/common/types/bool.go
new file mode 100644
index 0000000..565734f
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/bool.go
@@ -0,0 +1,141 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Bool type that implements ref.Val and supports comparison and negation.
+type Bool bool
+
+var (
+ // boolWrapperType golang reflected type for protobuf bool wrapper type.
+ boolWrapperType = reflect.TypeOf(&wrapperspb.BoolValue{})
+)
+
+// Boolean constants
+const (
+ False = Bool(false)
+ True = Bool(true)
+)
+
+// Compare implements the traits.Comparer interface method.
+func (b Bool) Compare(other ref.Val) ref.Val {
+ otherBool, ok := other.(Bool)
+ if !ok {
+ return ValOrErr(other, "no such overload")
+ }
+ if b == otherBool {
+ return IntZero
+ }
+ if !b && otherBool {
+ return IntNegOne
+ }
+ return IntOne
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (b Bool) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Bool:
+ return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped to a wrapperspb.BoolValue before being packed into an Any.
+ return anypb.New(wrapperspb.Bool(bool(b)))
+ case boolWrapperType:
+ // Convert the bool to a wrapperspb.BoolValue.
+ return wrapperspb.Bool(bool(b)), nil
+ case jsonValueType:
+ // Return the bool as a new structpb.Value.
+ return structpb.NewBoolValue(bool(b)), nil
+ default:
+ if typeDesc.Elem().Kind() == reflect.Bool {
+ p := bool(b)
+ return &p, nil
+ }
+ }
+ case reflect.Interface:
+ bv := b.Value()
+ if reflect.TypeOf(bv).Implements(typeDesc) {
+ return bv, nil
+ }
+ if reflect.TypeOf(b).Implements(typeDesc) {
+ return b, nil
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from bool to '%v'", typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (b Bool) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String(strconv.FormatBool(bool(b)))
+ case BoolType:
+ return b
+ case TypeType:
+ return BoolType
+ }
+ return NewErr("type conversion error from '%v' to '%v'", BoolType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (b Bool) Equal(other ref.Val) ref.Val {
+ otherBool, ok := other.(Bool)
+ return Bool(ok && b == otherBool)
+}
+
+// IsZeroValue returns true if the boolean value is false.
+func (b Bool) IsZeroValue() bool {
+ return b == False
+}
+
+// Negate implements the traits.Negater interface method.
+func (b Bool) Negate() ref.Val {
+ return !b
+}
+
+// Type implements the ref.Val interface method.
+func (b Bool) Type() ref.Type {
+ return BoolType
+}
+
+// Value implements the ref.Val interface method.
+func (b Bool) Value() any {
+ return bool(b)
+}
+
+// IsBool returns whether the input ref.Val or ref.Type is equal to BoolType.
+func IsBool(elem ref.Val) bool {
+ switch v := elem.(type) {
+ case Bool:
+ return true
+ case ref.Val:
+ return v.Type() == BoolType
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/bytes.go b/vendor/github.com/google/cel-go/common/types/bytes.go
new file mode 100644
index 0000000..5838755
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/bytes.go
@@ -0,0 +1,130 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "unicode/utf8"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Bytes type that implements ref.Val and supports add, compare, and size
+// operations.
+type Bytes []byte
+
+var (
+ // byteWrapperType golang reflected type for protobuf bytes wrapper type.
+ byteWrapperType = reflect.TypeOf(&wrapperspb.BytesValue{})
+)
+
+// Add implements traits.Adder interface method by concatenating byte sequences.
+func (b Bytes) Add(other ref.Val) ref.Val {
+ otherBytes, ok := other.(Bytes)
+ if !ok {
+ return ValOrErr(other, "no such overload")
+ }
+ return append(b, otherBytes...)
+}
+
+// Compare implements traits.Comparer interface method by lexicographic ordering.
+func (b Bytes) Compare(other ref.Val) ref.Val {
+ otherBytes, ok := other.(Bytes)
+ if !ok {
+ return ValOrErr(other, "no such overload")
+ }
+ return Int(bytes.Compare(b, otherBytes))
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Array, reflect.Slice:
+ return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.Bytes([]byte(b)))
+ case byteWrapperType:
+ // Convert the bytes to a wrapperspb.BytesValue.
+ return wrapperspb.Bytes([]byte(b)), nil
+ case jsonValueType:
+ // CEL follows the proto3 to JSON conversion by encoding bytes to a string via base64.
+ // The encoding below matches the golang 'encoding/json' behavior during marshaling,
+ // which uses base64.StdEncoding.
+ str := base64.StdEncoding.EncodeToString([]byte(b))
+ return structpb.NewStringValue(str), nil
+ }
+ case reflect.Interface:
+ bv := b.Value()
+ if reflect.TypeOf(bv).Implements(typeDesc) {
+ return bv, nil
+ }
+ if reflect.TypeOf(b).Implements(typeDesc) {
+ return b, nil
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from Bytes to '%v'", typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (b Bytes) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ if !utf8.Valid(b) {
+ return NewErr("invalid UTF-8 in bytes, cannot convert to string")
+ }
+ return String(b)
+ case BytesType:
+ return b
+ case TypeType:
+ return BytesType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", BytesType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (b Bytes) Equal(other ref.Val) ref.Val {
+ otherBytes, ok := other.(Bytes)
+ return Bool(ok && bytes.Equal(b, otherBytes))
+}
+
+// IsZeroValue returns true if the byte array is empty.
+func (b Bytes) IsZeroValue() bool {
+ return len(b) == 0
+}
+
+// Size implements the traits.Sizer interface method.
+func (b Bytes) Size() ref.Val {
+ return Int(len(b))
+}
+
+// Type implements the ref.Val interface method.
+func (b Bytes) Type() ref.Type {
+ return BytesType
+}
+
+// Value implements the ref.Val interface method.
+func (b Bytes) Value() any {
+ return []byte(b)
+}
diff --git a/vendor/github.com/google/cel-go/common/types/compare.go b/vendor/github.com/google/cel-go/common/types/compare.go
new file mode 100644
index 0000000..e196826
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/compare.go
@@ -0,0 +1,97 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "math"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+func compareDoubleInt(d Double, i Int) Int {
+ if d < math.MinInt64 {
+ return IntNegOne
+ }
+ if d > math.MaxInt64 {
+ return IntOne
+ }
+ return compareDouble(d, Double(i))
+}
+
+func compareIntDouble(i Int, d Double) Int {
+ return -compareDoubleInt(d, i)
+}
+
+func compareDoubleUint(d Double, u Uint) Int {
+ if d < 0 {
+ return IntNegOne
+ }
+ if d > math.MaxUint64 {
+ return IntOne
+ }
+ return compareDouble(d, Double(u))
+}
+
+func compareUintDouble(u Uint, d Double) Int {
+ return -compareDoubleUint(d, u)
+}
+
+func compareIntUint(i Int, u Uint) Int {
+ if i < 0 || u > math.MaxInt64 {
+ return IntNegOne
+ }
+ cmp := i - Int(u)
+ if cmp < 0 {
+ return IntNegOne
+ }
+ if cmp > 0 {
+ return IntOne
+ }
+ return IntZero
+}
+
+func compareUintInt(u Uint, i Int) Int {
+ return -compareIntUint(i, u)
+}
+
+func compareDouble(a, b Double) Int {
+ if a < b {
+ return IntNegOne
+ }
+ if a > b {
+ return IntOne
+ }
+ return IntZero
+}
+
+func compareInt(a, b Int) ref.Val {
+ if a < b {
+ return IntNegOne
+ }
+ if a > b {
+ return IntOne
+ }
+ return IntZero
+}
+
+func compareUint(a, b Uint) ref.Val {
+ if a < b {
+ return IntNegOne
+ }
+ if a > b {
+ return IntOne
+ }
+ return IntZero
+}
diff --git a/vendor/github.com/google/cel-go/common/types/doc.go b/vendor/github.com/google/cel-go/common/types/doc.go
new file mode 100644
index 0000000..5f641d7
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package types contains the types, traits, and utilities common to all
+// components of expression handling.
+package types
diff --git a/vendor/github.com/google/cel-go/common/types/double.go b/vendor/github.com/google/cel-go/common/types/double.go
new file mode 100644
index 0000000..027e789
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/double.go
@@ -0,0 +1,211 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Double type that implements ref.Val, comparison, and mathematical
+// operations.
+type Double float64
+
+var (
+ // doubleWrapperType reflected type for protobuf double wrapper type.
+ doubleWrapperType = reflect.TypeOf(&wrapperspb.DoubleValue{})
+
+ // floatWrapperType reflected type for protobuf float wrapper type.
+ floatWrapperType = reflect.TypeOf(&wrapperspb.FloatValue{})
+)
+
+// Add implements traits.Adder.Add.
+func (d Double) Add(other ref.Val) ref.Val {
+ otherDouble, ok := other.(Double)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return d + otherDouble
+}
+
+// Compare implements traits.Comparer.Compare.
+func (d Double) Compare(other ref.Val) ref.Val {
+ if math.IsNaN(float64(d)) {
+ return NewErr("NaN values cannot be ordered")
+ }
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return NewErr("NaN values cannot be ordered")
+ }
+ return compareDouble(d, ov)
+ case Int:
+ return compareDoubleInt(d, ov)
+ case Uint:
+ return compareDoubleUint(d, ov)
+ default:
+ return MaybeNoSuchOverloadErr(other)
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (d Double) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Float32:
+ v := float32(d)
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Float64:
+ v := float64(d)
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.Double(float64(d)))
+ case doubleWrapperType:
+ // Convert to a wrapperspb.DoubleValue
+ return wrapperspb.Double(float64(d)), nil
+ case floatWrapperType:
+ // Convert to a wrapperspb.FloatValue (with truncation).
+ return wrapperspb.Float(float32(d)), nil
+ case jsonValueType:
+ // Note, there are special cases for proto3 to json conversion that
+ // expect the floating point value to be converted to a NaN,
+ // Infinity, or -Infinity string values, but the jsonpb string
+ // marshaling of the protobuf.Value will handle this conversion.
+ return structpb.NewNumberValue(float64(d)), nil
+ }
+ switch typeDesc.Elem().Kind() {
+ case reflect.Float32:
+ v := float32(d)
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ case reflect.Float64:
+ v := float64(d)
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ }
+ case reflect.Interface:
+ dv := d.Value()
+ if reflect.TypeOf(dv).Implements(typeDesc) {
+ return dv, nil
+ }
+ if reflect.TypeOf(d).Implements(typeDesc) {
+ return d, nil
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from Double to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (d Double) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ i, err := doubleToInt64Checked(float64(d))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(i)
+ case UintType:
+ i, err := doubleToUint64Checked(float64(d))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(i)
+ case DoubleType:
+ return d
+ case StringType:
+ return String(fmt.Sprintf("%g", float64(d)))
+ case TypeType:
+ return DoubleType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", DoubleType, typeVal)
+}
+
+// Divide implements traits.Divider.Divide.
+func (d Double) Divide(other ref.Val) ref.Val {
+ otherDouble, ok := other.(Double)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return d / otherDouble
+}
+
+// Equal implements ref.Val.Equal.
+func (d Double) Equal(other ref.Val) ref.Val {
+ if math.IsNaN(float64(d)) {
+ return False
+ }
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return False
+ }
+ return Bool(d == ov)
+ case Int:
+ return Bool(compareDoubleInt(d, ov) == 0)
+ case Uint:
+ return Bool(compareDoubleUint(d, ov) == 0)
+ default:
+ return False
+ }
+}
+
+// IsZeroValue returns true if double value is 0.0
+func (d Double) IsZeroValue() bool {
+ return float64(d) == 0.0
+}
+
+// Multiply implements traits.Multiplier.Multiply.
+func (d Double) Multiply(other ref.Val) ref.Val {
+ otherDouble, ok := other.(Double)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return d * otherDouble
+}
+
+// Negate implements traits.Negater.Negate.
+func (d Double) Negate() ref.Val {
+ return -d
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (d Double) Subtract(subtrahend ref.Val) ref.Val {
+ subtraDouble, ok := subtrahend.(Double)
+ if !ok {
+ return MaybeNoSuchOverloadErr(subtrahend)
+ }
+ return d - subtraDouble
+}
+
+// Type implements ref.Val.Type.
+func (d Double) Type() ref.Type {
+ return DoubleType
+}
+
+// Value implements ref.Val.Value.
+func (d Double) Value() any {
+ return float64(d)
+}
diff --git a/vendor/github.com/google/cel-go/common/types/duration.go b/vendor/github.com/google/cel-go/common/types/duration.go
new file mode 100644
index 0000000..596e56d
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/duration.go
@@ -0,0 +1,222 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "time"
+
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ dpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// Duration type that implements ref.Val and supports add, compare, negate,
+// and subtract operators. This type is also a receiver which means it can
+// participate in dispatch to receiver functions.
+type Duration struct {
+ time.Duration
+}
+
+func durationOf(d time.Duration) Duration {
+ return Duration{Duration: d}
+}
+
+var (
+ durationValueType = reflect.TypeOf(&dpb.Duration{})
+
+ durationZeroArgOverloads = map[string]func(ref.Val) ref.Val{
+ overloads.TimeGetHours: DurationGetHours,
+ overloads.TimeGetMinutes: DurationGetMinutes,
+ overloads.TimeGetSeconds: DurationGetSeconds,
+ overloads.TimeGetMilliseconds: DurationGetMilliseconds,
+ }
+)
+
+// Add implements traits.Adder.Add.
+func (d Duration) Add(other ref.Val) ref.Val {
+ switch other.Type() {
+ case DurationType:
+ dur2 := other.(Duration)
+ val, err := addDurationChecked(d.Duration, dur2.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return durationOf(val)
+ case TimestampType:
+ ts := other.(Timestamp).Time
+ val, err := addTimeDurationChecked(ts, d.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return timestampOf(val)
+ }
+ return MaybeNoSuchOverloadErr(other)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (d Duration) Compare(other ref.Val) ref.Val {
+ otherDur, ok := other.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ d1 := d.Duration
+ d2 := otherDur.Duration
+ switch {
+ case d1 < d2:
+ return IntNegOne
+ case d1 > d2:
+ return IntOne
+ default:
+ return IntZero
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (d Duration) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the duration is already assignable to the desired type return it.
+ if reflect.TypeOf(d.Duration).AssignableTo(typeDesc) {
+ return d.Duration, nil
+ }
+ if reflect.TypeOf(d).AssignableTo(typeDesc) {
+ return d, nil
+ }
+ switch typeDesc {
+ case anyValueType:
+ // Pack the duration as a dpb.Duration into an Any value.
+ return anypb.New(dpb.New(d.Duration))
+ case durationValueType:
+ // Unwrap the CEL value to its underlying proto value.
+ return dpb.New(d.Duration), nil
+ case jsonValueType:
+ // CEL follows the proto3 to JSON conversion.
+ // Note, using jsonpb would wrap the result in extra double quotes.
+ v := d.ConvertToType(StringType)
+ if IsError(v) {
+ return nil, v.(*Err)
+ }
+ return structpb.NewStringValue(string(v.(String))), nil
+ }
+ return nil, fmt.Errorf("type conversion error from 'Duration' to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (d Duration) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String(strconv.FormatFloat(d.Seconds(), 'f', -1, 64) + "s")
+ case IntType:
+ return Int(d.Duration)
+ case DurationType:
+ return d
+ case TypeType:
+ return DurationType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", DurationType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (d Duration) Equal(other ref.Val) ref.Val {
+ otherDur, ok := other.(Duration)
+ return Bool(ok && d.Duration == otherDur.Duration)
+}
+
+// IsZeroValue returns true if the duration value is zero
+func (d Duration) IsZeroValue() bool {
+ return d.Duration == 0
+}
+
+// Negate implements traits.Negater.Negate.
+func (d Duration) Negate() ref.Val {
+ val, err := negateDurationChecked(d.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return durationOf(val)
+}
+
+// Receive implements traits.Receiver.Receive.
+func (d Duration) Receive(function string, overload string, args []ref.Val) ref.Val {
+ if len(args) == 0 {
+ if f, found := durationZeroArgOverloads[function]; found {
+ return f(d)
+ }
+ }
+ return NoSuchOverloadErr()
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (d Duration) Subtract(subtrahend ref.Val) ref.Val {
+ subtraDur, ok := subtrahend.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(subtrahend)
+ }
+ val, err := subtractDurationChecked(d.Duration, subtraDur.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return durationOf(val)
+}
+
+// Type implements ref.Val.Type.
+func (d Duration) Type() ref.Type {
+ return DurationType
+}
+
+// Value implements ref.Val.Value.
+func (d Duration) Value() any {
+ return d.Duration
+}
+
+// DurationGetHours returns the duration in hours.
+func DurationGetHours(val ref.Val) ref.Val {
+ dur, ok := val.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(val)
+ }
+ return Int(dur.Hours())
+}
+
+// DurationGetMinutes returns duration in minutes.
+func DurationGetMinutes(val ref.Val) ref.Val {
+ dur, ok := val.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(val)
+ }
+ return Int(dur.Minutes())
+}
+
+// DurationGetSeconds returns duration in seconds.
+func DurationGetSeconds(val ref.Val) ref.Val {
+ dur, ok := val.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(val)
+ }
+ return Int(dur.Seconds())
+}
+
+// DurationGetMilliseconds returns duration in milliseconds.
+func DurationGetMilliseconds(val ref.Val) ref.Val {
+ dur, ok := val.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(val)
+ }
+ return Int(dur.Milliseconds())
+}
diff --git a/vendor/github.com/google/cel-go/common/types/err.go b/vendor/github.com/google/cel-go/common/types/err.go
new file mode 100644
index 0000000..aa8f94b
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/err.go
@@ -0,0 +1,146 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Error interface which allows types types.Err values to be treated as error values.
+type Error interface {
+ error
+ ref.Val
+}
+
+// Err type which extends the built-in go error and implements ref.Val.
+type Err struct {
+ error
+}
+
+var (
+ // ErrType singleton.
+ ErrType = NewOpaqueType("error")
+
+ // errDivideByZero is an error indicating a division by zero of an integer value.
+ errDivideByZero = errors.New("division by zero")
+ // errModulusByZero is an error indicating a modulus by zero of an integer value.
+ errModulusByZero = errors.New("modulus by zero")
+ // errIntOverflow is an error representing integer overflow.
+ errIntOverflow = errors.New("integer overflow")
+ // errUintOverflow is an error representing unsigned integer overflow.
+ errUintOverflow = errors.New("unsigned integer overflow")
+ // errDurationOverflow is an error representing duration overflow.
+ errDurationOverflow = errors.New("duration overflow")
+ // errTimestampOverflow is an error representing timestamp overflow.
+ errTimestampOverflow = errors.New("timestamp overflow")
+ celErrTimestampOverflow = &Err{error: errTimestampOverflow}
+
+ // celErrNoSuchOverload indicates that the call arguments did not match a supported method signature.
+ celErrNoSuchOverload = NewErr("no such overload")
+)
+
+// NewErr creates a new Err described by the format string and args.
+// TODO: Audit the use of this function and standardize the error messages and codes.
+func NewErr(format string, args ...any) ref.Val {
+ return &Err{fmt.Errorf(format, args...)}
+}
+
+// NoSuchOverloadErr returns a new types.Err instance with a no such overload message.
+func NoSuchOverloadErr() ref.Val {
+ return celErrNoSuchOverload
+}
+
+// UnsupportedRefValConversionErr returns a types.NewErr instance with a no such conversion
+// message that indicates that the native value could not be converted to a CEL ref.Val.
+func UnsupportedRefValConversionErr(val any) ref.Val {
+ return NewErr("unsupported conversion to ref.Val: (%T)%v", val, val)
+}
+
+// MaybeNoSuchOverloadErr returns the error or unknown if the input ref.Val is one of these types,
+// else a new no such overload error.
+func MaybeNoSuchOverloadErr(val ref.Val) ref.Val {
+ return ValOrErr(val, "no such overload")
+}
+
+// ValOrErr either returns the existing error or creates a new one.
+// TODO: Audit the use of this function and standardize the error messages and codes.
+func ValOrErr(val ref.Val, format string, args ...any) ref.Val {
+ if val == nil || !IsUnknownOrError(val) {
+ return NewErr(format, args...)
+ }
+ return val
+}
+
+// WrapErr wraps an existing Go error value into a CEL Err value.
+func WrapErr(err error) ref.Val {
+ return &Err{error: err}
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (e *Err) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return nil, e.error
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (e *Err) ConvertToType(typeVal ref.Type) ref.Val {
+ // Errors are not convertible to other representations.
+ return e
+}
+
+// Equal implements ref.Val.Equal.
+func (e *Err) Equal(other ref.Val) ref.Val {
+ // An error cannot be equal to any other value, so it returns itself.
+ return e
+}
+
+// String implements fmt.Stringer.
+func (e *Err) String() string {
+ return e.error.Error()
+}
+
+// Type implements ref.Val.Type.
+func (e *Err) Type() ref.Type {
+ return ErrType
+}
+
+// Value implements ref.Val.Value.
+func (e *Err) Value() any {
+ return e.error
+}
+
+// Is implements errors.Is.
+func (e *Err) Is(target error) bool {
+ return e.error.Error() == target.Error()
+}
+
+// Unwrap implements errors.Unwrap.
+func (e *Err) Unwrap() error {
+ return e.error
+}
+
+// IsError returns whether the input element ref.Type or ref.Val is equal to
+// the ErrType singleton.
+func IsError(val ref.Val) bool {
+ switch val.(type) {
+ case *Err:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/int.go b/vendor/github.com/google/cel-go/common/types/int.go
new file mode 100644
index 0000000..940772a
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/int.go
@@ -0,0 +1,291 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Int type that implements ref.Val as well as comparison and math operators.
+type Int int64
+
+// Int constants used for comparison results.
+const (
+ // IntZero is the zero-value for Int
+ IntZero = Int(0)
+ IntOne = Int(1)
+ IntNegOne = Int(-1)
+)
+
+var (
+ // int32WrapperType reflected type for protobuf int32 wrapper type.
+ int32WrapperType = reflect.TypeOf(&wrapperspb.Int32Value{})
+
+ // int64WrapperType reflected type for protobuf int64 wrapper type.
+ int64WrapperType = reflect.TypeOf(&wrapperspb.Int64Value{})
+)
+
+// Add implements traits.Adder.Add.
+func (i Int) Add(other ref.Val) ref.Val {
+ otherInt, ok := other.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := addInt64Checked(int64(i), int64(otherInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (i Int) Compare(other ref.Val) ref.Val {
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return NewErr("NaN values cannot be ordered")
+ }
+ return compareIntDouble(i, ov)
+ case Int:
+ return compareInt(i, ov)
+ case Uint:
+ return compareIntUint(i, ov)
+ default:
+ return MaybeNoSuchOverloadErr(other)
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (i Int) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Int, reflect.Int32:
+ // Enums are also mapped as int32 derivations.
+ // Note, the code doesn't convert to the enum value directly since this is not known, but
+ // the net effect with respect to proto-assignment is handled correctly by the reflection
+ // Convert method.
+ v, err := int64ToInt32Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Int64:
+ return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.Int64(int64(i)))
+ case int32WrapperType:
+ // Convert the value to a wrapperspb.Int32Value, error on overflow.
+ v, err := int64ToInt32Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ return wrapperspb.Int32(v), nil
+ case int64WrapperType:
+ // Convert the value to a wrapperspb.Int64Value.
+ return wrapperspb.Int64(int64(i)), nil
+ case jsonValueType:
+ // The proto-to-JSON conversion rules would convert all 64-bit integer values to JSON
+ // decimal strings. Because CEL ints might come from the automatic widening of 32-bit
+ // values in protos, the JSON type is chosen dynamically based on the value.
+ //
+ // - Integers -2^53-1 < n < 2^53-1 are encoded as JSON numbers.
+ // - Integers outside this range are encoded as JSON strings.
+ //
+ // The integer to float range represents the largest interval where such a conversion
+ // can round-trip accurately. Thus, conversions from a 32-bit source can expect a JSON
+ // number as with protobuf. Those consuming JSON from a 64-bit source must be able to
+ // handle either a JSON number or a JSON decimal string. To handle these cases safely
+ // the string values must be explicitly converted to int() within a CEL expression;
+ // however, it is best to simply stay within the JSON number range when building JSON
+ // objects in CEL.
+ if i.isJSONSafe() {
+ return structpb.NewNumberValue(float64(i)), nil
+ }
+ // Proto3 to JSON conversion requires string-formatted int64 values
+ // since the conversion to floating point would result in truncation.
+ return structpb.NewStringValue(strconv.FormatInt(int64(i), 10)), nil
+ }
+ switch typeDesc.Elem().Kind() {
+ case reflect.Int32:
+ // Convert the value to a wrapperspb.Int32Value, error on overflow.
+ v, err := int64ToInt32Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ case reflect.Int64:
+ v := int64(i)
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ }
+ case reflect.Interface:
+ iv := i.Value()
+ if reflect.TypeOf(iv).Implements(typeDesc) {
+ return iv, nil
+ }
+ if reflect.TypeOf(i).Implements(typeDesc) {
+ return i, nil
+ }
+ }
+ return nil, fmt.Errorf("unsupported type conversion from 'int' to %v", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (i Int) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ return i
+ case UintType:
+ u, err := int64ToUint64Checked(int64(i))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(u)
+ case DoubleType:
+ return Double(i)
+ case StringType:
+ return String(fmt.Sprintf("%d", int64(i)))
+ case TimestampType:
+ // The maximum positive value that can be passed to time.Unix is math.MaxInt64 minus the number
+ // of seconds between year 1 and year 1970. See comments on unixToInternal.
+ if int64(i) < minUnixTime || int64(i) > maxUnixTime {
+ return celErrTimestampOverflow
+ }
+ return timestampOf(time.Unix(int64(i), 0).UTC())
+ case TypeType:
+ return IntType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", IntType, typeVal)
+}
+
+// Divide implements traits.Divider.Divide.
+func (i Int) Divide(other ref.Val) ref.Val {
+ otherInt, ok := other.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := divideInt64Checked(int64(i), int64(otherInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Equal implements ref.Val.Equal.
+func (i Int) Equal(other ref.Val) ref.Val {
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return False
+ }
+ return Bool(compareIntDouble(i, ov) == 0)
+ case Int:
+ return Bool(i == ov)
+ case Uint:
+ return Bool(compareIntUint(i, ov) == 0)
+ default:
+ return False
+ }
+}
+
+// IsZeroValue returns true if integer is equal to 0
+func (i Int) IsZeroValue() bool {
+ return i == IntZero
+}
+
+// Modulo implements traits.Modder.Modulo.
+func (i Int) Modulo(other ref.Val) ref.Val {
+ otherInt, ok := other.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := moduloInt64Checked(int64(i), int64(otherInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Multiply implements traits.Multiplier.Multiply.
+func (i Int) Multiply(other ref.Val) ref.Val {
+ otherInt, ok := other.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := multiplyInt64Checked(int64(i), int64(otherInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Negate implements traits.Negater.Negate.
+func (i Int) Negate() ref.Val {
+ val, err := negateInt64Checked(int64(i))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (i Int) Subtract(subtrahend ref.Val) ref.Val {
+ subtraInt, ok := subtrahend.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(subtrahend)
+ }
+ val, err := subtractInt64Checked(int64(i), int64(subtraInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Type implements ref.Val.Type.
+func (i Int) Type() ref.Type {
+ return IntType
+}
+
+// Value implements ref.Val.Value.
+func (i Int) Value() any {
+ return int64(i)
+}
+
+// isJSONSafe indicates whether the int is safely representable as a floating point value in JSON.
+func (i Int) isJSONSafe() bool {
+ return i >= minIntJSON && i <= maxIntJSON
+}
+
+const (
+ // maxIntJSON is defined as the Number.MAX_SAFE_INTEGER value per EcmaScript 6.
+ maxIntJSON = 1<<53 - 1
+ // minIntJSON is defined as the Number.MIN_SAFE_INTEGER value per EcmaScript 6.
+ minIntJSON = -maxIntJSON
+)
diff --git a/vendor/github.com/google/cel-go/common/types/iterator.go b/vendor/github.com/google/cel-go/common/types/iterator.go
new file mode 100644
index 0000000..98e9147
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/iterator.go
@@ -0,0 +1,55 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+var (
+ // IteratorType singleton.
+ IteratorType = NewObjectType("iterator", traits.IteratorType)
+)
+
+// baseIterator is the basis for list, map, and object iterators.
+//
+// An iterator in and of itself should not be a valid value for comparison, but must implement the
+// `ref.Val` methods in order to be well-supported within instruction arguments processed by the
+// interpreter.
+type baseIterator struct{}
+
+func (*baseIterator) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return nil, fmt.Errorf("type conversion on iterators not supported")
+}
+
+func (*baseIterator) ConvertToType(typeVal ref.Type) ref.Val {
+ return NewErr("no such overload")
+}
+
+func (*baseIterator) Equal(other ref.Val) ref.Val {
+ return NewErr("no such overload")
+}
+
+func (*baseIterator) Type() ref.Type {
+ return IteratorType
+}
+
+func (*baseIterator) Value() any {
+ return nil
+}
diff --git a/vendor/github.com/google/cel-go/common/types/json_value.go b/vendor/github.com/google/cel-go/common/types/json_value.go
new file mode 100644
index 0000000..13a4efe
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/json_value.go
@@ -0,0 +1,29 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "reflect"
+
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// JSON type constants representing the reflected types of protobuf JSON values.
+var (
+ jsonValueType = reflect.TypeOf(&structpb.Value{})
+ jsonListValueType = reflect.TypeOf(&structpb.ListValue{})
+ jsonStructType = reflect.TypeOf(&structpb.Struct{})
+ jsonNullType = reflect.TypeOf(structpb.NullValue_NULL_VALUE)
+)
diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go
new file mode 100644
index 0000000..d4932b4
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/list.go
@@ -0,0 +1,523 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// NewDynamicList returns a traits.Lister with heterogenous elements.
+// value should be an array of "native" types, i.e. any type that
+// NativeToValue() can convert to a ref.Val.
+func NewDynamicList(adapter Adapter, value any) traits.Lister {
+ refValue := reflect.ValueOf(value)
+ return &baseList{
+ Adapter: adapter,
+ value: value,
+ size: refValue.Len(),
+ get: func(i int) any {
+ return refValue.Index(i).Interface()
+ },
+ }
+}
+
+// NewStringList returns a traits.Lister containing only strings.
+func NewStringList(adapter Adapter, elems []string) traits.Lister {
+ return &baseList{
+ Adapter: adapter,
+ value: elems,
+ size: len(elems),
+ get: func(i int) any { return elems[i] },
+ }
+}
+
+// NewRefValList returns a traits.Lister with ref.Val elements.
+//
+// This type specialization is used with list literals within CEL expressions.
+func NewRefValList(adapter Adapter, elems []ref.Val) traits.Lister {
+ return &baseList{
+ Adapter: adapter,
+ value: elems,
+ size: len(elems),
+ get: func(i int) any { return elems[i] },
+ }
+}
+
+// NewProtoList returns a traits.Lister based on a pb.List instance.
+func NewProtoList(adapter Adapter, list protoreflect.List) traits.Lister {
+ return &baseList{
+ Adapter: adapter,
+ value: list,
+ size: list.Len(),
+ get: func(i int) any { return list.Get(i).Interface() },
+ }
+}
+
+// NewJSONList returns a traits.Lister based on structpb.ListValue instance.
+func NewJSONList(adapter Adapter, l *structpb.ListValue) traits.Lister {
+ vals := l.GetValues()
+ return &baseList{
+ Adapter: adapter,
+ value: l,
+ size: len(vals),
+ get: func(i int) any { return vals[i] },
+ }
+}
+
+// NewMutableList creates a new mutable list whose internal state can be modified.
+func NewMutableList(adapter Adapter) traits.MutableLister {
+ var mutableValues []ref.Val
+ l := &mutableList{
+ baseList: &baseList{
+ Adapter: adapter,
+ value: mutableValues,
+ size: 0,
+ },
+ mutableValues: mutableValues,
+ }
+ l.get = func(i int) any {
+ return l.mutableValues[i]
+ }
+ return l
+}
+
+// baseList points to a list containing elements of any type.
+// The `value` is an array of native values, and refValue is its reflection object.
+// The `Adapter` enables native type to CEL type conversions.
+type baseList struct {
+ Adapter
+ value any
+
+ // size indicates the number of elements within the list.
+ // Since objects are immutable the size of a list is static.
+ size int
+
+ // get returns a value at the specified integer index.
+ // The index is guaranteed to be checked against the list index range.
+ get func(int) any
+}
+
+// Add implements the traits.Adder interface method.
+func (l *baseList) Add(other ref.Val) ref.Val {
+ otherList, ok := other.(traits.Lister)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ if l.Size() == IntZero {
+ return other
+ }
+ if otherList.Size() == IntZero {
+ return l
+ }
+ return &concatList{
+ Adapter: l.Adapter,
+ prevList: l,
+ nextList: otherList}
+}
+
+// Contains implements the traits.Container interface method.
+func (l *baseList) Contains(elem ref.Val) ref.Val {
+ for i := 0; i < l.size; i++ {
+ val := l.NativeToValue(l.get(i))
+ cmp := elem.Equal(val)
+ b, ok := cmp.(Bool)
+ if ok && b == True {
+ return True
+ }
+ }
+ return False
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (l *baseList) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the underlying list value is assignable to the reflected type return it.
+ if reflect.TypeOf(l.value).AssignableTo(typeDesc) {
+ return l.value, nil
+ }
+ // If the list wrapper is assignable to the desired type return it.
+ if reflect.TypeOf(l).AssignableTo(typeDesc) {
+ return l, nil
+ }
+ // Attempt to convert the list to a set of well known protobuf types.
+ switch typeDesc {
+ case anyValueType:
+ json, err := l.ConvertToNative(jsonListValueType)
+ if err != nil {
+ return nil, err
+ }
+ return anypb.New(json.(proto.Message))
+ case jsonValueType, jsonListValueType:
+ jsonValues, err :=
+ l.ConvertToNative(reflect.TypeOf([]*structpb.Value{}))
+ if err != nil {
+ return nil, err
+ }
+ jsonList := &structpb.ListValue{Values: jsonValues.([]*structpb.Value)}
+ if typeDesc == jsonListValueType {
+ return jsonList, nil
+ }
+ return structpb.NewListValue(jsonList), nil
+ }
+ // Non-list conversion.
+ if typeDesc.Kind() != reflect.Slice && typeDesc.Kind() != reflect.Array {
+ return nil, fmt.Errorf("type conversion error from list to '%v'", typeDesc)
+ }
+
+ // List conversion.
+ // Allow the element ConvertToNative() function to determine whether conversion is possible.
+ otherElemType := typeDesc.Elem()
+ elemCount := l.size
+ nativeList := reflect.MakeSlice(typeDesc, elemCount, elemCount)
+ for i := 0; i < elemCount; i++ {
+ elem := l.NativeToValue(l.get(i))
+ nativeElemVal, err := elem.ConvertToNative(otherElemType)
+ if err != nil {
+ return nil, err
+ }
+ nativeList.Index(i).Set(reflect.ValueOf(nativeElemVal))
+ }
+ return nativeList.Interface(), nil
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (l *baseList) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case ListType:
+ return l
+ case TypeType:
+ return ListType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (l *baseList) Equal(other ref.Val) ref.Val {
+ otherList, ok := other.(traits.Lister)
+ if !ok {
+ return False
+ }
+ if l.Size() != otherList.Size() {
+ return False
+ }
+ for i := IntZero; i < l.Size().(Int); i++ {
+ thisElem := l.Get(i)
+ otherElem := otherList.Get(i)
+ elemEq := Equal(thisElem, otherElem)
+ if elemEq == False {
+ return False
+ }
+ }
+ return True
+}
+
+// Get implements the traits.Indexer interface method.
+func (l *baseList) Get(index ref.Val) ref.Val {
+ ind, err := IndexOrError(index)
+ if err != nil {
+ return ValOrErr(index, err.Error())
+ }
+ if ind < 0 || ind >= l.size {
+ return NewErr("index '%d' out of range in list size '%d'", ind, l.Size())
+ }
+ return l.NativeToValue(l.get(ind))
+}
+
+// IsZeroValue returns true if the list is empty.
+func (l *baseList) IsZeroValue() bool {
+ return l.size == 0
+}
+
+// Iterator implements the traits.Iterable interface method.
+func (l *baseList) Iterator() traits.Iterator {
+ return newListIterator(l)
+}
+
+// Size implements the traits.Sizer interface method.
+func (l *baseList) Size() ref.Val {
+ return Int(l.size)
+}
+
+// Type implements the ref.Val interface method.
+func (l *baseList) Type() ref.Type {
+ return ListType
+}
+
+// Value implements the ref.Val interface method.
+func (l *baseList) Value() any {
+ return l.value
+}
+
+// String converts the list to a human readable string form.
+func (l *baseList) String() string {
+ var sb strings.Builder
+ sb.WriteString("[")
+ for i := 0; i < l.size; i++ {
+ sb.WriteString(fmt.Sprintf("%v", l.get(i)))
+ if i != l.size-1 {
+ sb.WriteString(", ")
+ }
+ }
+ sb.WriteString("]")
+ return sb.String()
+}
+
+// mutableList aggregates values into its internal storage. For use with internal CEL variables only.
+type mutableList struct {
+ *baseList
+ mutableValues []ref.Val
+}
+
+// Add copies elements from the other list into the internal storage of the mutable list.
+// The ref.Val returned by Add is the receiver.
+func (l *mutableList) Add(other ref.Val) ref.Val {
+ switch otherList := other.(type) {
+ case *mutableList:
+ l.mutableValues = append(l.mutableValues, otherList.mutableValues...)
+ l.size += len(otherList.mutableValues)
+ case traits.Lister:
+ for i := IntZero; i < otherList.Size().(Int); i++ {
+ l.size++
+ l.mutableValues = append(l.mutableValues, otherList.Get(i))
+ }
+ default:
+ return MaybeNoSuchOverloadErr(otherList)
+ }
+ return l
+}
+
+// ToImmutableList returns an immutable list based on the internal storage of the mutable list.
+func (l *mutableList) ToImmutableList() traits.Lister {
+ // The reference to internal state is guaranteed to be safe as this call is only performed
+ // when mutations have been completed.
+ return NewRefValList(l.Adapter, l.mutableValues)
+}
+
+// concatList combines two list implementations together into a view.
+// The `Adapter` enables native type to CEL type conversions.
+type concatList struct {
+ Adapter
+ value any
+ prevList traits.Lister
+ nextList traits.Lister
+}
+
+// Add implements the traits.Adder interface method.
+func (l *concatList) Add(other ref.Val) ref.Val {
+ otherList, ok := other.(traits.Lister)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ if l.Size() == IntZero {
+ return other
+ }
+ if otherList.Size() == IntZero {
+ return l
+ }
+ return &concatList{
+ Adapter: l.Adapter,
+ prevList: l,
+ nextList: otherList}
+}
+
+// Contains implements the traits.Container interface method.
+func (l *concatList) Contains(elem ref.Val) ref.Val {
+ // The concat list relies on the IsErrorOrUnknown checks against the input element to be
+ // performed by the `prevList` and/or `nextList`.
+ prev := l.prevList.Contains(elem)
+ // Short-circuit the return if the elem was found in the prev list.
+ if prev == True {
+ return prev
+ }
+ // Return if the elem was found in the next list.
+ next := l.nextList.Contains(elem)
+ if next == True {
+ return next
+ }
+ // Handle the case where an error or unknown was encountered before checking next.
+ if IsUnknownOrError(prev) {
+ return prev
+ }
+ // Otherwise, rely on the next value as the representative result.
+ return next
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (l *concatList) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ combined := NewDynamicList(l.Adapter, l.Value().([]any))
+ return combined.ConvertToNative(typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (l *concatList) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case ListType:
+ return l
+ case TypeType:
+ return ListType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (l *concatList) Equal(other ref.Val) ref.Val {
+ otherList, ok := other.(traits.Lister)
+ if !ok {
+ return False
+ }
+ if l.Size() != otherList.Size() {
+ return False
+ }
+ var maybeErr ref.Val
+ for i := IntZero; i < l.Size().(Int); i++ {
+ thisElem := l.Get(i)
+ otherElem := otherList.Get(i)
+ elemEq := Equal(thisElem, otherElem)
+ if elemEq == False {
+ return False
+ }
+ if maybeErr == nil && IsUnknownOrError(elemEq) {
+ maybeErr = elemEq
+ }
+ }
+ if maybeErr != nil {
+ return maybeErr
+ }
+ return True
+}
+
+// Get implements the traits.Indexer interface method.
+func (l *concatList) Get(index ref.Val) ref.Val {
+ ind, err := IndexOrError(index)
+ if err != nil {
+ return ValOrErr(index, err.Error())
+ }
+ i := Int(ind)
+ if i < l.prevList.Size().(Int) {
+ return l.prevList.Get(i)
+ }
+ offset := i - l.prevList.Size().(Int)
+ return l.nextList.Get(offset)
+}
+
+// IsZeroValue returns true if the list is empty.
+func (l *concatList) IsZeroValue() bool {
+ return l.Size().(Int) == 0
+}
+
+// Iterator implements the traits.Iterable interface method.
+func (l *concatList) Iterator() traits.Iterator {
+ return newListIterator(l)
+}
+
+// Size implements the traits.Sizer interface method.
+func (l *concatList) Size() ref.Val {
+ return l.prevList.Size().(Int).Add(l.nextList.Size())
+}
+
+// String converts the concatenated list to a human-readable string.
+func (l *concatList) String() string {
+ var sb strings.Builder
+ sb.WriteString("[")
+ for i := Int(0); i < l.Size().(Int); i++ {
+ sb.WriteString(fmt.Sprintf("%v", l.Get(i)))
+ if i != l.Size().(Int)-1 {
+ sb.WriteString(", ")
+ }
+ }
+ sb.WriteString("]")
+ return sb.String()
+}
+
+// Type implements the ref.Val interface method.
+func (l *concatList) Type() ref.Type {
+ return ListType
+}
+
+// Value implements the ref.Val interface method.
+func (l *concatList) Value() any {
+ if l.value == nil {
+ merged := make([]any, l.Size().(Int))
+ prevLen := l.prevList.Size().(Int)
+ for i := Int(0); i < prevLen; i++ {
+ merged[i] = l.prevList.Get(i).Value()
+ }
+ nextLen := l.nextList.Size().(Int)
+ for j := Int(0); j < nextLen; j++ {
+ merged[prevLen+j] = l.nextList.Get(j).Value()
+ }
+ l.value = merged
+ }
+ return l.value
+}
+
+func newListIterator(listValue traits.Lister) traits.Iterator {
+ return &listIterator{
+ listValue: listValue,
+ len: listValue.Size().(Int),
+ }
+}
+
+type listIterator struct {
+ *baseIterator
+ listValue traits.Lister
+ cursor Int
+ len Int
+}
+
+// HasNext implements the traits.Iterator interface method.
+func (it *listIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+// Next implements the traits.Iterator interface method.
+func (it *listIterator) Next() ref.Val {
+ if it.HasNext() == True {
+ index := it.cursor
+ it.cursor++
+ return it.listValue.Get(index)
+ }
+ return nil
+}
+
+// IndexOrError converts an input index value into either a lossless integer index or an error.
+func IndexOrError(index ref.Val) (int, error) {
+ switch iv := index.(type) {
+ case Int:
+ return int(iv), nil
+ case Double:
+ if ik, ok := doubleToInt64Lossless(float64(iv)); ok {
+ return int(ik), nil
+ }
+ return -1, fmt.Errorf("unsupported index value %v in list", index)
+ case Uint:
+ if ik, ok := uint64ToInt64Lossless(uint64(iv)); ok {
+ return int(ik), nil
+ }
+ return -1, fmt.Errorf("unsupported index value %v in list", index)
+ default:
+ return -1, fmt.Errorf("unsupported index type '%s' in list", index.Type())
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/map.go b/vendor/github.com/google/cel-go/common/types/map.go
new file mode 100644
index 0000000..739b7aa
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/map.go
@@ -0,0 +1,854 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/stoewer/go-strcase"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs.
+func NewDynamicMap(adapter Adapter, value any) traits.Mapper {
+ refValue := reflect.ValueOf(value)
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newReflectMapAccessor(adapter, refValue),
+ value: value,
+ size: refValue.Len(),
+ }
+}
+
+// NewJSONStruct creates a traits.Mapper implementation backed by a JSON struct that has been
+// encoded in protocol buffer form.
+//
+// The `adapter` argument provides type adaptation capabilities from proto to CEL.
+func NewJSONStruct(adapter Adapter, value *structpb.Struct) traits.Mapper {
+ fields := value.GetFields()
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newJSONStructAccessor(adapter, fields),
+ value: value,
+ size: len(fields),
+ }
+}
+
+// NewRefValMap returns a specialized traits.Mapper with CEL valued keys and values.
+func NewRefValMap(adapter Adapter, value map[ref.Val]ref.Val) traits.Mapper {
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newRefValMapAccessor(value),
+ value: value,
+ size: len(value),
+ }
+}
+
+// NewStringInterfaceMap returns a specialized traits.Mapper with string keys and interface values.
+func NewStringInterfaceMap(adapter Adapter, value map[string]any) traits.Mapper {
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newStringIfaceMapAccessor(adapter, value),
+ value: value,
+ size: len(value),
+ }
+}
+
+// NewStringStringMap returns a specialized traits.Mapper with string keys and values.
+func NewStringStringMap(adapter Adapter, value map[string]string) traits.Mapper {
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newStringMapAccessor(value),
+ value: value,
+ size: len(value),
+ }
+}
+
+// NewProtoMap returns a specialized traits.Mapper for handling protobuf map values.
+func NewProtoMap(adapter Adapter, value *pb.Map) traits.Mapper {
+ return &protoMap{
+ Adapter: adapter,
+ value: value,
+ }
+}
+
+// mapAccessor is a private interface for finding values within a map and iterating over the keys.
+// This interface implements portions of the API surface area required by the traits.Mapper
+// interface.
+type mapAccessor interface {
+ // Find returns a value, if one exists, for the input key.
+ //
+ // If the key is not found the function returns (nil, false).
+ Find(ref.Val) (ref.Val, bool)
+
+ // Iterator returns an Iterator over the map key set.
+ Iterator() traits.Iterator
+}
+
+// baseMap is a reflection based map implementation designed to handle a variety of map-like types.
+//
+// Since CEL is side-effect free, the base map represents an immutable object.
+type baseMap struct {
+ // TypeAdapter used to convert keys and values accessed within the map.
+ Adapter
+
+ // mapAccessor interface implementation used to find and iterate over map keys.
+ mapAccessor
+
+ // value is the native Go value upon which the map type operators.
+ value any
+
+ // size is the number of entries in the map.
+ size int
+}
+
+// Contains implements the traits.Container interface method.
+func (m *baseMap) Contains(index ref.Val) ref.Val {
+ _, found := m.Find(index)
+ return Bool(found)
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the map is already assignable to the desired type return it, e.g. interfaces and
+ // maps with the same key value types.
+ if reflect.TypeOf(m.value).AssignableTo(typeDesc) {
+ return m.value, nil
+ }
+ if reflect.TypeOf(m).AssignableTo(typeDesc) {
+ return m, nil
+ }
+ switch typeDesc {
+ case anyValueType:
+ json, err := m.ConvertToNative(jsonStructType)
+ if err != nil {
+ return nil, err
+ }
+ return anypb.New(json.(proto.Message))
+ case jsonValueType, jsonStructType:
+ jsonEntries, err :=
+ m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{}))
+ if err != nil {
+ return nil, err
+ }
+ jsonMap := &structpb.Struct{Fields: jsonEntries.(map[string]*structpb.Value)}
+ if typeDesc == jsonStructType {
+ return jsonMap, nil
+ }
+ return structpb.NewStructValue(jsonMap), nil
+ }
+
+ // Unwrap pointers, but track their use.
+ isPtr := false
+ if typeDesc.Kind() == reflect.Ptr {
+ tk := typeDesc
+ typeDesc = typeDesc.Elem()
+ if typeDesc.Kind() == reflect.Ptr {
+ return nil, fmt.Errorf("unsupported type conversion to '%v'", tk)
+ }
+ isPtr = true
+ }
+ switch typeDesc.Kind() {
+ // Map conversion.
+ case reflect.Map:
+ otherKey := typeDesc.Key()
+ otherElem := typeDesc.Elem()
+ nativeMap := reflect.MakeMapWithSize(typeDesc, m.size)
+ it := m.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ refKeyValue, err := key.ConvertToNative(otherKey)
+ if err != nil {
+ return nil, err
+ }
+ refElemValue, err := m.Get(key).ConvertToNative(otherElem)
+ if err != nil {
+ return nil, err
+ }
+ nativeMap.SetMapIndex(reflect.ValueOf(refKeyValue), reflect.ValueOf(refElemValue))
+ }
+ return nativeMap.Interface(), nil
+ case reflect.Struct:
+ nativeStructPtr := reflect.New(typeDesc)
+ nativeStruct := nativeStructPtr.Elem()
+ it := m.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ // Ensure the field name being referenced is exported.
+ // Only exported (public) field names can be set by reflection, where the name
+ // must be at least one character in length and start with an upper-case letter.
+ fieldName := key.ConvertToType(StringType)
+ if IsError(fieldName) {
+ return nil, fieldName.(*Err)
+ }
+ name := string(fieldName.(String))
+ name = strcase.UpperCamelCase(name)
+ fieldRef := nativeStruct.FieldByName(name)
+ if !fieldRef.IsValid() {
+ return nil, fmt.Errorf("type conversion error, no such field '%s' in type '%v'", name, typeDesc)
+ }
+ fieldValue, err := m.Get(key).ConvertToNative(fieldRef.Type())
+ if err != nil {
+ return nil, err
+ }
+ fieldRef.Set(reflect.ValueOf(fieldValue))
+ }
+ if isPtr {
+ return nativeStructPtr.Interface(), nil
+ }
+ return nativeStruct.Interface(), nil
+ }
+ return nil, fmt.Errorf("type conversion error from map to '%v'", typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (m *baseMap) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case MapType:
+ return m
+ case TypeType:
+ return MapType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (m *baseMap) Equal(other ref.Val) ref.Val {
+ otherMap, ok := other.(traits.Mapper)
+ if !ok {
+ return False
+ }
+ if m.Size() != otherMap.Size() {
+ return False
+ }
+ it := m.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ thisVal, _ := m.Find(key)
+ otherVal, found := otherMap.Find(key)
+ if !found {
+ return False
+ }
+ valEq := Equal(thisVal, otherVal)
+ if valEq == False {
+ return False
+ }
+ }
+ return True
+}
+
+// Get implements the traits.Indexer interface method.
+func (m *baseMap) Get(key ref.Val) ref.Val {
+ v, found := m.Find(key)
+ if !found {
+ return ValOrErr(v, "no such key: %v", key)
+ }
+ return v
+}
+
+// IsZeroValue returns true if the map is empty.
+func (m *baseMap) IsZeroValue() bool {
+ return m.size == 0
+}
+
+// Size implements the traits.Sizer interface method.
+func (m *baseMap) Size() ref.Val {
+ return Int(m.size)
+}
+
+// String converts the map into a human-readable string.
+func (m *baseMap) String() string {
+ var sb strings.Builder
+ sb.WriteString("{")
+ it := m.Iterator()
+ i := 0
+ for it.HasNext() == True {
+ k := it.Next()
+ v, _ := m.Find(k)
+ sb.WriteString(fmt.Sprintf("%v: %v", k, v))
+ if i != m.size-1 {
+ sb.WriteString(", ")
+ }
+ i++
+ }
+ sb.WriteString("}")
+ return sb.String()
+}
+
+// Type implements the ref.Val interface method.
+func (m *baseMap) Type() ref.Type {
+ return MapType
+}
+
+// Value implements the ref.Val interface method.
+func (m *baseMap) Value() any {
+ return m.value
+}
+
+func newJSONStructAccessor(adapter Adapter, st map[string]*structpb.Value) mapAccessor {
+ return &jsonStructAccessor{
+ Adapter: adapter,
+ st: st,
+ }
+}
+
+type jsonStructAccessor struct {
+ Adapter
+ st map[string]*structpb.Value
+}
+
+// Find searches the json struct field map for the input key value and returns (value, true) if
+// found.
+//
+// If the key is not found the function returns (nil, false).
+func (a *jsonStructAccessor) Find(key ref.Val) (ref.Val, bool) {
+ strKey, ok := key.(String)
+ if !ok {
+ return nil, false
+ }
+ keyVal, found := a.st[string(strKey)]
+ if !found {
+ return nil, false
+ }
+ return a.NativeToValue(keyVal), true
+}
+
+// Iterator creates a new traits.Iterator from the set of JSON struct field names.
+func (a *jsonStructAccessor) Iterator() traits.Iterator {
+ // Copy the keys to make their order stable.
+ mapKeys := make([]string, len(a.st))
+ i := 0
+ for k := range a.st {
+ mapKeys[i] = k
+ i++
+ }
+ return &stringKeyIterator{
+ mapKeys: mapKeys,
+ len: len(mapKeys),
+ }
+}
+
+func newReflectMapAccessor(adapter Adapter, value reflect.Value) mapAccessor {
+ keyType := value.Type().Key()
+ return &reflectMapAccessor{
+ Adapter: adapter,
+ refValue: value,
+ keyType: keyType,
+ }
+}
+
+type reflectMapAccessor struct {
+ Adapter
+ refValue reflect.Value
+ keyType reflect.Type
+}
+
+// Find converts the input key to a native Golang type and then uses reflection to find the key,
+// returning (value, true) if present.
+//
+// If the key is not found the function returns (nil, false).
+func (m *reflectMapAccessor) Find(key ref.Val) (ref.Val, bool) {
+ if m.refValue.Len() == 0 {
+ return nil, false
+ }
+ if keyVal, found := m.findInternal(key); found {
+ return keyVal, true
+ }
+ switch k := key.(type) {
+ // Double is not a valid proto map key type, so check for the key as an int or uint.
+ case Double:
+ if ik, ok := doubleToInt64Lossless(float64(k)); ok {
+ if keyVal, found := m.findInternal(Int(ik)); found {
+ return keyVal, true
+ }
+ }
+ if uk, ok := doubleToUint64Lossless(float64(k)); ok {
+ return m.findInternal(Uint(uk))
+ }
+ // map keys of type double are not supported.
+ case Int:
+ if uk, ok := int64ToUint64Lossless(int64(k)); ok {
+ return m.findInternal(Uint(uk))
+ }
+ case Uint:
+ if ik, ok := uint64ToInt64Lossless(uint64(k)); ok {
+ return m.findInternal(Int(ik))
+ }
+ }
+ return nil, false
+}
+
+// findInternal attempts to convert the incoming key to the map's internal native type
+// and then returns the value, if found.
+func (m *reflectMapAccessor) findInternal(key ref.Val) (ref.Val, bool) {
+ k, err := key.ConvertToNative(m.keyType)
+ if err != nil {
+ return nil, false
+ }
+ refKey := reflect.ValueOf(k)
+ val := m.refValue.MapIndex(refKey)
+ if val.IsValid() {
+ return m.NativeToValue(val.Interface()), true
+ }
+ return nil, false
+}
+
+// Iterator creates a Golang reflection based traits.Iterator.
+func (m *reflectMapAccessor) Iterator() traits.Iterator {
+ return &mapIterator{
+ Adapter: m.Adapter,
+ mapKeys: m.refValue.MapRange(),
+ len: m.refValue.Len(),
+ }
+}
+
+func newRefValMapAccessor(mapVal map[ref.Val]ref.Val) mapAccessor {
+ return &refValMapAccessor{mapVal: mapVal}
+}
+
+type refValMapAccessor struct {
+ mapVal map[ref.Val]ref.Val
+}
+
+// Find uses native map accesses to find the key, returning (value, true) if present.
+//
+// If the key is not found the function returns (nil, false).
+func (a *refValMapAccessor) Find(key ref.Val) (ref.Val, bool) {
+ if len(a.mapVal) == 0 {
+ return nil, false
+ }
+ if keyVal, found := a.mapVal[key]; found {
+ return keyVal, true
+ }
+ switch k := key.(type) {
+ case Double:
+ if ik, ok := doubleToInt64Lossless(float64(k)); ok {
+ if keyVal, found := a.mapVal[Int(ik)]; found {
+ return keyVal, found
+ }
+ }
+ if uk, ok := doubleToUint64Lossless(float64(k)); ok {
+ keyVal, found := a.mapVal[Uint(uk)]
+ return keyVal, found
+ }
+ // map keys of type double are not supported.
+ case Int:
+ if uk, ok := int64ToUint64Lossless(int64(k)); ok {
+ keyVal, found := a.mapVal[Uint(uk)]
+ return keyVal, found
+ }
+ case Uint:
+ if ik, ok := uint64ToInt64Lossless(uint64(k)); ok {
+ keyVal, found := a.mapVal[Int(ik)]
+ return keyVal, found
+ }
+ }
+ return nil, false
+}
+
+// Iterator produces a new traits.Iterator which iterates over the map keys via Golang reflection.
+func (a *refValMapAccessor) Iterator() traits.Iterator {
+ return &mapIterator{
+ Adapter: DefaultTypeAdapter,
+ mapKeys: reflect.ValueOf(a.mapVal).MapRange(),
+ len: len(a.mapVal),
+ }
+}
+
+func newStringMapAccessor(strMap map[string]string) mapAccessor {
+ return &stringMapAccessor{mapVal: strMap}
+}
+
+type stringMapAccessor struct {
+ mapVal map[string]string
+}
+
+// Find uses native map accesses to find the key, returning (value, true) if present.
+//
+// If the key is not found the function returns (nil, false).
+func (a *stringMapAccessor) Find(key ref.Val) (ref.Val, bool) {
+ strKey, ok := key.(String)
+ if !ok {
+ return nil, false
+ }
+ keyVal, found := a.mapVal[string(strKey)]
+ if !found {
+ return nil, false
+ }
+ return String(keyVal), true
+}
+
+// Iterator creates a new traits.Iterator from the string key set of the map.
+func (a *stringMapAccessor) Iterator() traits.Iterator {
+ // Copy the keys to make their order stable.
+ mapKeys := make([]string, len(a.mapVal))
+ i := 0
+ for k := range a.mapVal {
+ mapKeys[i] = k
+ i++
+ }
+ return &stringKeyIterator{
+ mapKeys: mapKeys,
+ len: len(mapKeys),
+ }
+}
+
+func newStringIfaceMapAccessor(adapter Adapter, mapVal map[string]any) mapAccessor {
+ return &stringIfaceMapAccessor{
+ Adapter: adapter,
+ mapVal: mapVal,
+ }
+}
+
+type stringIfaceMapAccessor struct {
+ Adapter
+ mapVal map[string]any
+}
+
+// Find uses native map accesses to find the key, returning (value, true) if present.
+//
+// If the key is not found the function returns (nil, false).
+func (a *stringIfaceMapAccessor) Find(key ref.Val) (ref.Val, bool) {
+ strKey, ok := key.(String)
+ if !ok {
+ return nil, false
+ }
+ keyVal, found := a.mapVal[string(strKey)]
+ if !found {
+ return nil, false
+ }
+ return a.NativeToValue(keyVal), true
+}
+
+// Iterator creates a new traits.Iterator from the string key set of the map.
+func (a *stringIfaceMapAccessor) Iterator() traits.Iterator {
+ // Copy the keys to make their order stable.
+ mapKeys := make([]string, len(a.mapVal))
+ i := 0
+ for k := range a.mapVal {
+ mapKeys[i] = k
+ i++
+ }
+ return &stringKeyIterator{
+ mapKeys: mapKeys,
+ len: len(mapKeys),
+ }
+}
+
+// protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to
+// accessing protoreflect.Map values.
+type protoMap struct {
+ Adapter
+ value *pb.Map
+}
+
+// Contains returns whether the map contains the given key.
+func (m *protoMap) Contains(key ref.Val) ref.Val {
+ _, found := m.Find(key)
+ return Bool(found)
+}
+
+// ConvertToNative implements the ref.Val interface method.
+//
+// Note, assignment to Golang struct types is not yet supported.
+func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the map is already assignable to the desired type return it, e.g. interfaces and
+ // maps with the same key value types.
+ switch typeDesc {
+ case anyValueType:
+ json, err := m.ConvertToNative(jsonStructType)
+ if err != nil {
+ return nil, err
+ }
+ return anypb.New(json.(proto.Message))
+ case jsonValueType, jsonStructType:
+ jsonEntries, err :=
+ m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{}))
+ if err != nil {
+ return nil, err
+ }
+ jsonMap := &structpb.Struct{
+ Fields: jsonEntries.(map[string]*structpb.Value)}
+ if typeDesc == jsonStructType {
+ return jsonMap, nil
+ }
+ return structpb.NewStructValue(jsonMap), nil
+ }
+ switch typeDesc.Kind() {
+ case reflect.Struct, reflect.Ptr:
+ if reflect.TypeOf(m.value).AssignableTo(typeDesc) {
+ return m.value, nil
+ }
+ if reflect.TypeOf(m).AssignableTo(typeDesc) {
+ return m, nil
+ }
+ }
+ if typeDesc.Kind() != reflect.Map {
+ return nil, fmt.Errorf("unsupported type conversion: %v to map", typeDesc)
+ }
+
+ keyType := m.value.KeyType.ReflectType()
+ valType := m.value.ValueType.ReflectType()
+ otherKeyType := typeDesc.Key()
+ otherValType := typeDesc.Elem()
+ mapVal := reflect.MakeMapWithSize(typeDesc, m.value.Len())
+ var err error
+ m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool {
+ ntvKey := key.Interface()
+ ntvVal := val.Interface()
+ switch pv := ntvVal.(type) {
+ case protoreflect.Message:
+ ntvVal = pv.Interface()
+ }
+ if keyType == otherKeyType && valType == otherValType {
+ mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal))
+ return true
+ }
+ celKey := m.NativeToValue(ntvKey)
+ celVal := m.NativeToValue(ntvVal)
+ ntvKey, err = celKey.ConvertToNative(otherKeyType)
+ if err != nil {
+ // early terminate the range loop.
+ return false
+ }
+ ntvVal, err = celVal.ConvertToNative(otherValType)
+ if err != nil {
+ // early terminate the range loop.
+ return false
+ }
+ mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal))
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return mapVal.Interface(), nil
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (m *protoMap) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case MapType:
+ return m
+ case TypeType:
+ return MapType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (m *protoMap) Equal(other ref.Val) ref.Val {
+ otherMap, ok := other.(traits.Mapper)
+ if !ok {
+ return False
+ }
+ if m.value.Map.Len() != int(otherMap.Size().(Int)) {
+ return False
+ }
+ var retVal ref.Val = True
+ m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool {
+ keyVal := m.NativeToValue(key.Interface())
+ valVal := m.NativeToValue(val)
+ otherVal, found := otherMap.Find(keyVal)
+ if !found {
+ retVal = False
+ return false
+ }
+ valEq := Equal(valVal, otherVal)
+ if valEq != True {
+ retVal = valEq
+ return false
+ }
+ return true
+ })
+ return retVal
+}
+
+// Find returns whether the protoreflect.Map contains the input key.
+//
+// If the key is not found the function returns (nil, false).
+func (m *protoMap) Find(key ref.Val) (ref.Val, bool) {
+ if keyVal, found := m.findInternal(key); found {
+ return keyVal, true
+ }
+ switch k := key.(type) {
+ // Double is not a valid proto map key type, so check for the key as an int or uint.
+ case Double:
+ if ik, ok := doubleToInt64Lossless(float64(k)); ok {
+ if keyVal, found := m.findInternal(Int(ik)); found {
+ return keyVal, true
+ }
+ }
+ if uk, ok := doubleToUint64Lossless(float64(k)); ok {
+ return m.findInternal(Uint(uk))
+ }
+ // map keys of type double are not supported.
+ case Int:
+ if uk, ok := int64ToUint64Lossless(int64(k)); ok {
+ return m.findInternal(Uint(uk))
+ }
+ case Uint:
+ if ik, ok := uint64ToInt64Lossless(uint64(k)); ok {
+ return m.findInternal(Int(ik))
+ }
+ }
+ return nil, false
+}
+
+// findInternal attempts to convert the incoming key to the map's internal native type
+// and then returns the value, if found.
+func (m *protoMap) findInternal(key ref.Val) (ref.Val, bool) {
+ // Convert the input key to the expected protobuf key type.
+ ntvKey, err := key.ConvertToNative(m.value.KeyType.ReflectType())
+ if err != nil {
+ return nil, false
+ }
+ // Use protoreflection to get the key value.
+ val := m.value.Get(protoreflect.ValueOf(ntvKey).MapKey())
+ if !val.IsValid() {
+ return nil, false
+ }
+ // Perform nominal type unwrapping from the input value.
+ switch v := val.Interface().(type) {
+ case protoreflect.List, protoreflect.Map:
+ // Maps do not support list or map values
+ return nil, false
+ default:
+ return m.NativeToValue(v), true
+ }
+}
+
+// Get implements the traits.Indexer interface method.
+func (m *protoMap) Get(key ref.Val) ref.Val {
+ v, found := m.Find(key)
+ if !found {
+ return ValOrErr(v, "no such key: %v", key)
+ }
+ return v
+}
+
+// IsZeroValue returns true if the map is empty.
+func (m *protoMap) IsZeroValue() bool {
+ return m.value.Len() == 0
+}
+
+// Iterator implements the traits.Iterable interface method.
+func (m *protoMap) Iterator() traits.Iterator {
+ // Copy the keys to make their order stable.
+ mapKeys := make([]protoreflect.MapKey, 0, m.value.Len())
+ m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ mapKeys = append(mapKeys, k)
+ return true
+ })
+ return &protoMapIterator{
+ Adapter: m.Adapter,
+ mapKeys: mapKeys,
+ len: m.value.Len(),
+ }
+}
+
+// Size returns the number of entries in the protoreflect.Map.
+func (m *protoMap) Size() ref.Val {
+ return Int(m.value.Len())
+}
+
+// Type implements the ref.Val interface method.
+func (m *protoMap) Type() ref.Type {
+ return MapType
+}
+
+// Value implements the ref.Val interface method.
+func (m *protoMap) Value() any {
+ return m.value
+}
+
+type mapIterator struct {
+ *baseIterator
+ Adapter
+ mapKeys *reflect.MapIter
+ cursor int
+ len int
+}
+
+// HasNext implements the traits.Iterator interface method.
+func (it *mapIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+// Next implements the traits.Iterator interface method.
+func (it *mapIterator) Next() ref.Val {
+ if it.HasNext() == True && it.mapKeys.Next() {
+ it.cursor++
+ refKey := it.mapKeys.Key()
+ return it.NativeToValue(refKey.Interface())
+ }
+ return nil
+}
+
+type protoMapIterator struct {
+ *baseIterator
+ Adapter
+ mapKeys []protoreflect.MapKey
+ cursor int
+ len int
+}
+
+// HasNext implements the traits.Iterator interface method.
+func (it *protoMapIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+// Next implements the traits.Iterator interface method.
+func (it *protoMapIterator) Next() ref.Val {
+ if it.HasNext() == True {
+ index := it.cursor
+ it.cursor++
+ refKey := it.mapKeys[index]
+ return it.NativeToValue(refKey.Interface())
+ }
+ return nil
+}
+
+type stringKeyIterator struct {
+ *baseIterator
+ mapKeys []string
+ cursor int
+ len int
+}
+
+// HasNext implements the traits.Iterator interface method.
+func (it *stringKeyIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+// Next implements the traits.Iterator interface method.
+func (it *stringKeyIterator) Next() ref.Val {
+ if it.HasNext() == True {
+ index := it.cursor
+ it.cursor++
+ return String(it.mapKeys[index])
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/cel-go/common/types/null.go b/vendor/github.com/google/cel-go/common/types/null.go
new file mode 100644
index 0000000..926ca3d
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/null.go
@@ -0,0 +1,111 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/proto"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// Null type implementation.
+type Null structpb.NullValue
+
+var (
+ // NullValue singleton.
+ NullValue = Null(structpb.NullValue_NULL_VALUE)
+
+ // golang reflect type for Null values.
+ nullReflectType = reflect.TypeOf(NullValue)
+)
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Int32:
+ switch typeDesc {
+ case jsonNullType:
+ return structpb.NullValue_NULL_VALUE, nil
+ case nullReflectType:
+ return n, nil
+ }
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Convert to a JSON-null before packing to an Any field since the enum value for JSON
+ // null cannot be packed directly.
+ pb, err := n.ConvertToNative(jsonValueType)
+ if err != nil {
+ return nil, err
+ }
+ return anypb.New(pb.(proto.Message))
+ case jsonValueType:
+ return structpb.NewNullValue(), nil
+ case boolWrapperType, byteWrapperType, doubleWrapperType, floatWrapperType,
+ int32WrapperType, int64WrapperType, stringWrapperType, uint32WrapperType,
+ uint64WrapperType:
+ return nil, nil
+ }
+ case reflect.Interface:
+ nv := n.Value()
+ if reflect.TypeOf(nv).Implements(typeDesc) {
+ return nv, nil
+ }
+ if reflect.TypeOf(n).Implements(typeDesc) {
+ return n, nil
+ }
+ }
+ // If the type conversion isn't supported return an error.
+ return nil, fmt.Errorf("type conversion error from '%v' to '%v'", NullType, typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (n Null) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String("null")
+ case NullType:
+ return n
+ case TypeType:
+ return NullType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", NullType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (n Null) Equal(other ref.Val) ref.Val {
+ return Bool(NullType == other.Type())
+}
+
+// IsZeroValue returns true as null always represents an absent value.
+func (n Null) IsZeroValue() bool {
+ return true
+}
+
+// Type implements ref.Val.Type.
+func (n Null) Type() ref.Type {
+ return NullType
+}
+
+// Value implements ref.Val.Value.
+func (n Null) Value() any {
+ return structpb.NullValue_NULL_VALUE
+}
diff --git a/vendor/github.com/google/cel-go/common/types/object.go b/vendor/github.com/google/cel-go/common/types/object.go
new file mode 100644
index 0000000..8ba0af9
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/object.go
@@ -0,0 +1,165 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+type protoObj struct {
+ Adapter
+ value proto.Message
+ typeDesc *pb.TypeDescription
+ typeValue ref.Val
+}
+
+// NewObject returns an object based on a proto.Message value which handles
+// conversion between protobuf type values and expression type values.
+// Objects support indexing and iteration.
+//
+// Note: the type value is pulled from the list of registered types within the
+// type provider. If the proto type is not registered within the type provider,
+// then this will result in an error within the type adapter / provider.
+func NewObject(adapter Adapter,
+ typeDesc *pb.TypeDescription,
+ typeValue ref.Val,
+ value proto.Message) ref.Val {
+ return &protoObj{
+ Adapter: adapter,
+ value: value,
+ typeDesc: typeDesc,
+ typeValue: typeValue}
+}
+
+func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ srcPB := o.value
+ if reflect.TypeOf(srcPB).AssignableTo(typeDesc) {
+ return srcPB, nil
+ }
+ if reflect.TypeOf(o).AssignableTo(typeDesc) {
+ return o, nil
+ }
+ switch typeDesc {
+ case anyValueType:
+ _, isAny := srcPB.(*anypb.Any)
+ if isAny {
+ return srcPB, nil
+ }
+ return anypb.New(srcPB)
+ case jsonValueType:
+ // Marshal the proto to JSON first, and then rehydrate as protobuf.Value as there is no
+ // support for direct conversion from proto.Message to protobuf.Value.
+ bytes, err := protojson.Marshal(srcPB)
+ if err != nil {
+ return nil, err
+ }
+ json := &structpb.Value{}
+ err = protojson.Unmarshal(bytes, json)
+ if err != nil {
+ return nil, err
+ }
+ return json, nil
+ default:
+ if typeDesc == o.typeDesc.ReflectType() {
+ return o.value, nil
+ }
+ if typeDesc.Kind() == reflect.Ptr {
+ val := reflect.New(typeDesc.Elem()).Interface()
+ dstPB, ok := val.(proto.Message)
+ if ok {
+ err := pb.Merge(dstPB, srcPB)
+ if err != nil {
+ return nil, fmt.Errorf("type conversion error: %v", err)
+ }
+ return dstPB, nil
+ }
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from '%T' to '%v'", o.value, typeDesc)
+}
+
+func (o *protoObj) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ default:
+ if o.Type().TypeName() == typeVal.TypeName() {
+ return o
+ }
+ case TypeType:
+ return o.typeValue
+ }
+ return NewErr("type conversion error from '%s' to '%s'", o.typeDesc.Name(), typeVal)
+}
+
+func (o *protoObj) Equal(other ref.Val) ref.Val {
+ otherPB, ok := other.Value().(proto.Message)
+ return Bool(ok && pb.Equal(o.value, otherPB))
+}
+
+// IsSet tests whether a field which is defined is set to a non-default value.
+func (o *protoObj) IsSet(field ref.Val) ref.Val {
+ protoFieldName, ok := field.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(field)
+ }
+ protoFieldStr := string(protoFieldName)
+ fd, found := o.typeDesc.FieldByName(protoFieldStr)
+ if !found {
+ return NewErr("no such field '%s'", field)
+ }
+ if fd.IsSet(o.value) {
+ return True
+ }
+ return False
+}
+
+// IsZeroValue returns true if the protobuf object is empty.
+func (o *protoObj) IsZeroValue() bool {
+ return proto.Equal(o.value, o.typeDesc.Zero())
+}
+
+func (o *protoObj) Get(index ref.Val) ref.Val {
+ protoFieldName, ok := index.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(index)
+ }
+ protoFieldStr := string(protoFieldName)
+ fd, found := o.typeDesc.FieldByName(protoFieldStr)
+ if !found {
+ return NewErr("no such field '%s'", index)
+ }
+ fv, err := fd.GetFrom(o.value)
+ if err != nil {
+ return NewErr(err.Error())
+ }
+ return o.NativeToValue(fv)
+}
+
+func (o *protoObj) Type() ref.Type {
+ return o.typeValue.(ref.Type)
+}
+
+func (o *protoObj) Value() any {
+ return o.value
+}
diff --git a/vendor/github.com/google/cel-go/common/types/optional.go b/vendor/github.com/google/cel-go/common/types/optional.go
new file mode 100644
index 0000000..a9f30ae
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/optional.go
@@ -0,0 +1,108 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+var (
+ // OptionalType indicates the runtime type of an optional value.
+ OptionalType = NewOpaqueType("optional")
+
+ // OptionalNone is a sentinel value which is used to indicate an empty optional value.
+ OptionalNone = &Optional{}
+)
+
+// OptionalOf returns an optional value which wraps a concrete CEL value.
+func OptionalOf(value ref.Val) *Optional {
+ return &Optional{value: value}
+}
+
+// Optional value which points to a value if non-empty.
+type Optional struct {
+ value ref.Val
+}
+
+// HasValue returns true if the optional has a value.
+func (o *Optional) HasValue() bool {
+ return o.value != nil
+}
+
+// GetValue returns the wrapped value contained in the optional.
+func (o *Optional) GetValue() ref.Val {
+ if !o.HasValue() {
+ return NewErr("optional.none() dereference")
+ }
+ return o.value
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (o *Optional) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ if !o.HasValue() {
+ return nil, errors.New("optional.none() dereference")
+ }
+ return o.value.ConvertToNative(typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (o *Optional) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case OptionalType:
+ return o
+ case TypeType:
+ return OptionalType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", OptionalType, typeVal)
+}
+
+// Equal determines whether the values contained by two optional values are equal.
+func (o *Optional) Equal(other ref.Val) ref.Val {
+ otherOpt, isOpt := other.(*Optional)
+ if !isOpt {
+ return False
+ }
+ if !o.HasValue() {
+ return Bool(!otherOpt.HasValue())
+ }
+ if !otherOpt.HasValue() {
+ return False
+ }
+ return o.value.Equal(otherOpt.value)
+}
+
+func (o *Optional) String() string {
+ if o.HasValue() {
+ return fmt.Sprintf("optional(%v)", o.GetValue())
+ }
+ return "optional.none()"
+}
+
+// Type implements the ref.Val interface method.
+func (o *Optional) Type() ref.Type {
+ return OptionalType
+}
+
+// Value returns the underlying 'Value()' of the wrapped value, if present.
+func (o *Optional) Value() any {
+ if o.value == nil {
+ return nil
+ }
+ return o.value.Value()
+}
diff --git a/vendor/github.com/google/cel-go/common/types/overflow.go b/vendor/github.com/google/cel-go/common/types/overflow.go
new file mode 100644
index 0000000..c68a921
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/overflow.go
@@ -0,0 +1,389 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "math"
+ "time"
+)
+
+var (
+ doubleTwoTo64 = math.Ldexp(1.0, 64)
+)
+
+// addInt64Checked performs addition with overflow detection of two int64 values.
+//
+// If the operation fails the error return value will be non-nil.
+func addInt64Checked(x, y int64) (int64, error) {
+ if (y > 0 && x > math.MaxInt64-y) || (y < 0 && x < math.MinInt64-y) {
+ return 0, errIntOverflow
+ }
+ return x + y, nil
+}
+
+// subtractInt64Checked performs subtraction with overflow detection of two int64 values.
+//
+// If the operation fails the error return value will be non-nil.
+func subtractInt64Checked(x, y int64) (int64, error) {
+ if (y < 0 && x > math.MaxInt64+y) || (y > 0 && x < math.MinInt64+y) {
+ return 0, errIntOverflow
+ }
+ return x - y, nil
+}
+
+// negateInt64Checked performs negation with overflow detection of an int64.
+//
+// If the operation fails the error return value will be non-nil.
+func negateInt64Checked(x int64) (int64, error) {
+ // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
+ if x == math.MinInt64 {
+ return 0, errIntOverflow
+ }
+ return -x, nil
+}
+
+// multiplyInt64Checked performs multiplication with overflow detection of two int64 value.
+//
+// If the operation fails the error return value will be non-nil.
+func multiplyInt64Checked(x, y int64) (int64, error) {
+ // Detecting multiplication overflow is more complicated than the others. The first two detect
+ // attempting to negate MinInt64, which would result in MaxInt64+1. The other four detect normal
+ // overflow conditions.
+ if (x == -1 && y == math.MinInt64) || (y == -1 && x == math.MinInt64) ||
+ // x is positive, y is positive
+ (x > 0 && y > 0 && x > math.MaxInt64/y) ||
+ // x is positive, y is negative
+ (x > 0 && y < 0 && y < math.MinInt64/x) ||
+ // x is negative, y is positive
+ (x < 0 && y > 0 && x < math.MinInt64/y) ||
+ // x is negative, y is negative
+ (x < 0 && y < 0 && y < math.MaxInt64/x) {
+ return 0, errIntOverflow
+ }
+ return x * y, nil
+}
+
+// divideInt64Checked performs division with overflow detection of two int64 values,
+// as well as a division by zero check.
+//
+// If the operation fails the error return value will be non-nil.
+func divideInt64Checked(x, y int64) (int64, error) {
+ // Division by zero.
+ if y == 0 {
+ return 0, errDivideByZero
+ }
+ // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
+ if x == math.MinInt64 && y == -1 {
+ return 0, errIntOverflow
+ }
+ return x / y, nil
+}
+
+// moduloInt64Checked performs modulo with overflow detection of two int64 values
+// as well as a modulus by zero check.
+//
+// If the operation fails the error return value will be non-nil.
+func moduloInt64Checked(x, y int64) (int64, error) {
+ // Modulus by zero.
+ if y == 0 {
+ return 0, errModulusByZero
+ }
+ // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
+ if x == math.MinInt64 && y == -1 {
+ return 0, errIntOverflow
+ }
+ return x % y, nil
+}
+
+// addUint64Checked performs addition with overflow detection of two uint64 values.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func addUint64Checked(x, y uint64) (uint64, error) {
+ if y > 0 && x > math.MaxUint64-y {
+ return 0, errUintOverflow
+ }
+ return x + y, nil
+}
+
+// subtractUint64Checked performs subtraction with overflow detection of two uint64 values.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func subtractUint64Checked(x, y uint64) (uint64, error) {
+ if y > x {
+ return 0, errUintOverflow
+ }
+ return x - y, nil
+}
+
+// multiplyUint64Checked performs multiplication with overflow detection of two uint64 values.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func multiplyUint64Checked(x, y uint64) (uint64, error) {
+ if y != 0 && x > math.MaxUint64/y {
+ return 0, errUintOverflow
+ }
+ return x * y, nil
+}
+
+// divideUint64Checked performs division with a test for division by zero.
+//
+// If the operation fails the error return value will be non-nil.
+func divideUint64Checked(x, y uint64) (uint64, error) {
+ if y == 0 {
+ return 0, errDivideByZero
+ }
+ return x / y, nil
+}
+
+// moduloUint64Checked performs modulo with a test for modulus by zero.
+//
+// If the operation fails the error return value will be non-nil.
+func moduloUint64Checked(x, y uint64) (uint64, error) {
+ if y == 0 {
+ return 0, errModulusByZero
+ }
+ return x % y, nil
+}
+
+// addDurationChecked performs addition with overflow detection of two time.Durations.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func addDurationChecked(x, y time.Duration) (time.Duration, error) {
+ val, err := addInt64Checked(int64(x), int64(y))
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(val), nil
+}
+
+// subtractDurationChecked performs subtraction with overflow detection of two time.Durations.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func subtractDurationChecked(x, y time.Duration) (time.Duration, error) {
+ val, err := subtractInt64Checked(int64(x), int64(y))
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(val), nil
+}
+
+// negateDurationChecked performs negation with overflow detection of a time.Duration.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func negateDurationChecked(x time.Duration) (time.Duration, error) {
+ val, err := negateInt64Checked(int64(x))
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(val), nil
+}
+
+// addDurationChecked performs addition with overflow detection of a time.Time and time.Duration.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func addTimeDurationChecked(x time.Time, y time.Duration) (time.Time, error) {
+ // This is tricky. A time is represented as (int64, int32) where the first is seconds and second
+ // is nanoseconds. A duration is int64 representing nanoseconds. We cannot normalize time to int64
+ // as it could potentially overflow. The only way to proceed is to break time and duration into
+ // second and nanosecond components.
+
+ // First we break time into its components by truncating and subtracting.
+ sec1 := x.Truncate(time.Second).Unix() // Truncate to seconds.
+ nsec1 := x.Sub(x.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
+
+ // Second we break duration into its components by dividing and modulo.
+ sec2 := int64(y) / int64(time.Second) // Truncate to seconds.
+ nsec2 := int64(y) % int64(time.Second) // Get remainder.
+
+ // Add seconds first, detecting any overflow.
+ sec, err := addInt64Checked(sec1, sec2)
+ if err != nil {
+ return time.Time{}, err
+ }
+ // Nanoseconds cannot overflow as time.Time normalizes them to [0, 999999999].
+ nsec := nsec1 + nsec2
+
+ // We need to normalize nanoseconds to be positive and carry extra nanoseconds to seconds.
+ // Adapted from time.Unix(int64, int64).
+ if nsec < 0 || nsec >= int64(time.Second) {
+ // Add seconds.
+ sec, err = addInt64Checked(sec, nsec/int64(time.Second))
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ nsec -= (nsec / int64(time.Second)) * int64(time.Second)
+ if nsec < 0 {
+ // Subtract an extra second
+ sec, err = addInt64Checked(sec, -1)
+ if err != nil {
+ return time.Time{}, err
+ }
+ nsec += int64(time.Second)
+ }
+ }
+
+ // Check if the the number of seconds from Unix epoch is within our acceptable range.
+ if sec < minUnixTime || sec > maxUnixTime {
+ return time.Time{}, errTimestampOverflow
+ }
+
+ // Return resulting time and propagate time zone.
+ return time.Unix(sec, nsec).In(x.Location()), nil
+}
+
+// subtractTimeChecked performs subtraction with overflow detection of two time.Time.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func subtractTimeChecked(x, y time.Time) (time.Duration, error) {
+ // Similar to addTimeDurationOverflow() above.
+
+ // First we break time into its components by truncating and subtracting.
+ sec1 := x.Truncate(time.Second).Unix() // Truncate to seconds.
+ nsec1 := x.Sub(x.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
+
+ // Second we break duration into its components by truncating and subtracting.
+ sec2 := y.Truncate(time.Second).Unix() // Truncate to seconds.
+ nsec2 := y.Sub(y.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
+
+ // Subtract seconds first, detecting any overflow.
+ sec, err := subtractInt64Checked(sec1, sec2)
+ if err != nil {
+ return time.Duration(0), err
+ }
+
+ // Nanoseconds cannot overflow as time.Time normalizes them to [0, 999999999].
+ nsec := nsec1 - nsec2
+
+ // Scale seconds to nanoseconds detecting overflow.
+ tsec, err := multiplyInt64Checked(sec, int64(time.Second))
+ if err != nil {
+ return time.Duration(0), err
+ }
+
+ // Lastly we need to add the two nanoseconds together.
+ val, err := addInt64Checked(tsec, nsec)
+ if err != nil {
+ return time.Duration(0), err
+ }
+
+ return time.Duration(val), nil
+}
+
+// subtractTimeDurationChecked performs subtraction with overflow detection of a time.Time and
+// time.Duration.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func subtractTimeDurationChecked(x time.Time, y time.Duration) (time.Time, error) {
+ // The easiest way to implement this is to negate y and add them.
+ // x - y = x + -y
+ val, err := negateDurationChecked(y)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return addTimeDurationChecked(x, val)
+}
+
+// doubleToInt64Checked converts a double to an int64 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func doubleToInt64Checked(v float64) (int64, error) {
+ if math.IsInf(v, 0) || math.IsNaN(v) || v <= float64(math.MinInt64) || v >= float64(math.MaxInt64) {
+ return 0, errIntOverflow
+ }
+ return int64(v), nil
+}
+
+// doubleToInt64Checked converts a double to a uint64 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func doubleToUint64Checked(v float64) (uint64, error) {
+ if math.IsInf(v, 0) || math.IsNaN(v) || v < 0 || v >= doubleTwoTo64 {
+ return 0, errUintOverflow
+ }
+ return uint64(v), nil
+}
+
+// int64ToUint64Checked converts an int64 to a uint64 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func int64ToUint64Checked(v int64) (uint64, error) {
+ if v < 0 {
+ return 0, errUintOverflow
+ }
+ return uint64(v), nil
+}
+
+// int64ToInt32Checked converts an int64 to an int32 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func int64ToInt32Checked(v int64) (int32, error) {
+ if v < math.MinInt32 || v > math.MaxInt32 {
+ return 0, errIntOverflow
+ }
+ return int32(v), nil
+}
+
+// uint64ToUint32Checked converts a uint64 to a uint32 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func uint64ToUint32Checked(v uint64) (uint32, error) {
+ if v > math.MaxUint32 {
+ return 0, errUintOverflow
+ }
+ return uint32(v), nil
+}
+
+// uint64ToInt64Checked converts a uint64 to an int64 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func uint64ToInt64Checked(v uint64) (int64, error) {
+ if v > math.MaxInt64 {
+ return 0, errIntOverflow
+ }
+ return int64(v), nil
+}
+
+func doubleToUint64Lossless(v float64) (uint64, bool) {
+ u, err := doubleToUint64Checked(v)
+ if err != nil {
+ return 0, false
+ }
+ if float64(u) != v {
+ return 0, false
+ }
+ return u, true
+}
+
+func doubleToInt64Lossless(v float64) (int64, bool) {
+ i, err := doubleToInt64Checked(v)
+ if err != nil {
+ return 0, false
+ }
+ if float64(i) != v {
+ return 0, false
+ }
+ return i, true
+}
+
+func int64ToUint64Lossless(v int64) (uint64, bool) {
+ u, err := int64ToUint64Checked(v)
+ return u, err == nil
+}
+
+func uint64ToInt64Lossless(v uint64) (int64, bool) {
+ i, err := uint64ToInt64Checked(v)
+ return i, err == nil
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel
new file mode 100644
index 0000000..e2b9d37
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel
@@ -0,0 +1,53 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "checked.go",
+ "enum.go",
+ "equal.go",
+ "file.go",
+ "pb.go",
+ "type.go",
+ ],
+ importpath = "github.com/google/cel-go/common/types/pb",
+ deps = [
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//encoding/protowire:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoregistry:go_default_library",
+ "@org_golang_google_protobuf//types/dynamicpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
+ "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "equal_test.go",
+ "file_test.go",
+ "pb_test.go",
+ "type_test.go",
+ ],
+ embed = [":go_default_library"],
+ deps = [
+ "//checker/decls:go_default_library",
+ "//test/proto2pb:test_all_types_go_proto",
+ "//test/proto3pb:test_all_types_go_proto",
+ "@org_golang_google_protobuf//reflect/protodesc:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//types/descriptorpb:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/types/pb/checked.go b/vendor/github.com/google/cel-go/common/types/pb/checked.go
new file mode 100644
index 0000000..312a6a0
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/checked.go
@@ -0,0 +1,93 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+var (
+ // CheckedPrimitives map from proto field descriptor type to expr.Type.
+ CheckedPrimitives = map[protoreflect.Kind]*exprpb.Type{
+ protoreflect.BoolKind: checkedBool,
+ protoreflect.BytesKind: checkedBytes,
+ protoreflect.DoubleKind: checkedDouble,
+ protoreflect.FloatKind: checkedDouble,
+ protoreflect.Int32Kind: checkedInt,
+ protoreflect.Int64Kind: checkedInt,
+ protoreflect.Sint32Kind: checkedInt,
+ protoreflect.Sint64Kind: checkedInt,
+ protoreflect.Uint32Kind: checkedUint,
+ protoreflect.Uint64Kind: checkedUint,
+ protoreflect.Fixed32Kind: checkedUint,
+ protoreflect.Fixed64Kind: checkedUint,
+ protoreflect.Sfixed32Kind: checkedInt,
+ protoreflect.Sfixed64Kind: checkedInt,
+ protoreflect.StringKind: checkedString}
+
+ // CheckedWellKnowns map from qualified proto type name to expr.Type for
+ // well-known proto types.
+ CheckedWellKnowns = map[string]*exprpb.Type{
+ // Wrapper types.
+ "google.protobuf.BoolValue": checkedWrap(checkedBool),
+ "google.protobuf.BytesValue": checkedWrap(checkedBytes),
+ "google.protobuf.DoubleValue": checkedWrap(checkedDouble),
+ "google.protobuf.FloatValue": checkedWrap(checkedDouble),
+ "google.protobuf.Int64Value": checkedWrap(checkedInt),
+ "google.protobuf.Int32Value": checkedWrap(checkedInt),
+ "google.protobuf.UInt64Value": checkedWrap(checkedUint),
+ "google.protobuf.UInt32Value": checkedWrap(checkedUint),
+ "google.protobuf.StringValue": checkedWrap(checkedString),
+ // Well-known types.
+ "google.protobuf.Any": checkedAny,
+ "google.protobuf.Duration": checkedDuration,
+ "google.protobuf.Timestamp": checkedTimestamp,
+ // Json types.
+ "google.protobuf.ListValue": checkedListDyn,
+ "google.protobuf.NullValue": checkedNull,
+ "google.protobuf.Struct": checkedMapStringDyn,
+ "google.protobuf.Value": checkedDyn,
+ }
+
+ // common types
+ checkedDyn = &exprpb.Type{TypeKind: &exprpb.Type_Dyn{Dyn: &emptypb.Empty{}}}
+ // Wrapper and primitive types.
+ checkedBool = checkedPrimitive(exprpb.Type_BOOL)
+ checkedBytes = checkedPrimitive(exprpb.Type_BYTES)
+ checkedDouble = checkedPrimitive(exprpb.Type_DOUBLE)
+ checkedInt = checkedPrimitive(exprpb.Type_INT64)
+ checkedString = checkedPrimitive(exprpb.Type_STRING)
+ checkedUint = checkedPrimitive(exprpb.Type_UINT64)
+ // Well-known type equivalents.
+ checkedAny = checkedWellKnown(exprpb.Type_ANY)
+ checkedDuration = checkedWellKnown(exprpb.Type_DURATION)
+ checkedTimestamp = checkedWellKnown(exprpb.Type_TIMESTAMP)
+ // Json-based type equivalents.
+ checkedNull = &exprpb.Type{
+ TypeKind: &exprpb.Type_Null{
+ Null: structpb.NullValue_NULL_VALUE}}
+ checkedListDyn = &exprpb.Type{
+ TypeKind: &exprpb.Type_ListType_{
+ ListType: &exprpb.Type_ListType{ElemType: checkedDyn}}}
+ checkedMapStringDyn = &exprpb.Type{
+ TypeKind: &exprpb.Type_MapType_{
+ MapType: &exprpb.Type_MapType{
+ KeyType: checkedString,
+ ValueType: checkedDyn}}}
+)
diff --git a/vendor/github.com/google/cel-go/common/types/pb/enum.go b/vendor/github.com/google/cel-go/common/types/pb/enum.go
new file mode 100644
index 0000000..09a1546
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/enum.go
@@ -0,0 +1,44 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// newEnumValueDescription produces an enum value description with the fully qualified enum value
+// name and the enum value descriptor.
+func newEnumValueDescription(name string, desc protoreflect.EnumValueDescriptor) *EnumValueDescription {
+ return &EnumValueDescription{
+ enumValueName: name,
+ desc: desc,
+ }
+}
+
+// EnumValueDescription maps a fully-qualified enum value name to its numeric value.
+type EnumValueDescription struct {
+ enumValueName string
+ desc protoreflect.EnumValueDescriptor
+}
+
+// Name returns the fully-qualified identifier name for the enum value.
+func (ed *EnumValueDescription) Name() string {
+ return ed.enumValueName
+}
+
+// Value returns the (numeric) value of the enum.
+func (ed *EnumValueDescription) Value() int32 {
+ return int32(ed.desc.Number())
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/equal.go b/vendor/github.com/google/cel-go/common/types/pb/equal.go
new file mode 100644
index 0000000..76893d8
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/equal.go
@@ -0,0 +1,206 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "bytes"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+// Equal returns whether two proto.Message instances are equal using the following criteria:
+//
+// - Messages must share the same instance of the type descriptor
+// - Known set fields are compared using semantics equality
+// - Bytes are compared using bytes.Equal
+// - Scalar values are compared with operator ==
+// - List and map types are equal if they have the same length and all elements are equal
+// - Messages are equal if they share the same descriptor and all set fields are equal
+// - Unknown fields are compared using byte equality
+// - NaN values are not equal to each other
+// - google.protobuf.Any values are unpacked before comparison
+// - If the type descriptor for a protobuf.Any cannot be found, byte equality is used rather than
+// semantic equality.
+//
+// This method of proto equality mirrors the behavior of the C++ protobuf MessageDifferencer
+// whereas the golang proto.Equal implementation mirrors the Java protobuf equals() methods
+// behaviors which needed to treat NaN values as equal due to Java semantics.
+func Equal(x, y proto.Message) bool {
+ if x == nil || y == nil {
+ return x == nil && y == nil
+ }
+ xRef := x.ProtoReflect()
+ yRef := y.ProtoReflect()
+ return equalMessage(xRef, yRef)
+}
+
+func equalMessage(mx, my protoreflect.Message) bool {
+ // Note, the original proto.Equal upon which this implementation is based does not specifically handle the
+ // case when both messages are invalid. It is assumed that the descriptors will be equal and that byte-wise
+ // comparison will be used, though the semantics of validity are neither clear, nor promised within the
+ // proto.Equal implementation.
+ if mx.IsValid() != my.IsValid() || mx.Descriptor() != my.Descriptor() {
+ return false
+ }
+
+ // This is an innovation on the default proto.Equal where protobuf.Any values are unpacked before comparison
+ // as otherwise the Any values are compared by bytes rather than structurally.
+ if isAny(mx) && isAny(my) {
+ ax := mx.Interface().(*anypb.Any)
+ ay := my.Interface().(*anypb.Any)
+ // If the values are not the same type url, return false.
+ if ax.GetTypeUrl() != ay.GetTypeUrl() {
+ return false
+ }
+ // If the values are byte equal, then return true.
+ if bytes.Equal(ax.GetValue(), ay.GetValue()) {
+ return true
+ }
+ // Otherwise fall through to the semantic comparison of the any values.
+ x, err := ax.UnmarshalNew()
+ if err != nil {
+ return false
+ }
+ y, err := ay.UnmarshalNew()
+ if err != nil {
+ return false
+ }
+ // Recursively compare the unwrapped messages to ensure nested Any values are unwrapped accordingly.
+ return equalMessage(x.ProtoReflect(), y.ProtoReflect())
+ }
+
+ // Walk the set fields to determine field-wise equality
+ nx := 0
+ equal := true
+ mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool {
+ nx++
+ equal = my.Has(fd) && equalField(fd, vx, my.Get(fd))
+ return equal
+ })
+ if !equal {
+ return false
+ }
+ // Establish the count of set fields on message y
+ ny := 0
+ my.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
+ ny++
+ return true
+ })
+ // If the number of set fields is not equal return false.
+ if nx != ny {
+ return false
+ }
+
+ return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool {
+ switch {
+ case fd.IsMap():
+ return equalMap(fd, x.Map(), y.Map())
+ case fd.IsList():
+ return equalList(fd, x.List(), y.List())
+ default:
+ return equalValue(fd, x, y)
+ }
+}
+
+func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool {
+ if x.Len() != y.Len() {
+ return false
+ }
+ equal := true
+ x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
+ vy := y.Get(k)
+ equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy)
+ return equal
+ })
+ return equal
+}
+
+func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool {
+ if x.Len() != y.Len() {
+ return false
+ }
+ for i := x.Len() - 1; i >= 0; i-- {
+ if !equalValue(fd, x.Get(i), y.Get(i)) {
+ return false
+ }
+ }
+ return true
+}
+
+func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ return x.Bool() == y.Bool()
+ case protoreflect.EnumKind:
+ return x.Enum() == y.Enum()
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind,
+ protoreflect.Int64Kind, protoreflect.Sint64Kind,
+ protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind:
+ return x.Int() == y.Int()
+ case protoreflect.Uint32Kind, protoreflect.Uint64Kind,
+ protoreflect.Fixed32Kind, protoreflect.Fixed64Kind:
+ return x.Uint() == y.Uint()
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
+ return x.Float() == y.Float()
+ case protoreflect.StringKind:
+ return x.String() == y.String()
+ case protoreflect.BytesKind:
+ return bytes.Equal(x.Bytes(), y.Bytes())
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ return equalMessage(x.Message(), y.Message())
+ default:
+ return x.Interface() == y.Interface()
+ }
+}
+
+func equalUnknown(x, y protoreflect.RawFields) bool {
+ lenX := len(x)
+ lenY := len(y)
+ if lenX != lenY {
+ return false
+ }
+ if lenX == 0 {
+ return true
+ }
+ if bytes.Equal([]byte(x), []byte(y)) {
+ return true
+ }
+
+ mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ for len(x) > 0 {
+ fnum, _, n := protowire.ConsumeField(x)
+ mx[fnum] = append(mx[fnum], x[:n]...)
+ x = x[n:]
+ }
+ for len(y) > 0 {
+ fnum, _, n := protowire.ConsumeField(y)
+ my[fnum] = append(my[fnum], y[:n]...)
+ y = y[n:]
+ }
+ return reflect.DeepEqual(mx, my)
+}
+
+func isAny(m protoreflect.Message) bool {
+ return string(m.Descriptor().FullName()) == "google.protobuf.Any"
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/file.go b/vendor/github.com/google/cel-go/common/types/pb/file.go
new file mode 100644
index 0000000..e323afb
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/file.go
@@ -0,0 +1,202 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ dynamicpb "google.golang.org/protobuf/types/dynamicpb"
+)
+
+// newFileDescription returns a FileDescription instance with a complete listing of all the message
+// types and enum values, as well as a map of extensions declared within any scope in the file.
+func newFileDescription(fileDesc protoreflect.FileDescriptor, pbdb *Db) (*FileDescription, extensionMap) {
+ metadata := collectFileMetadata(fileDesc)
+ enums := make(map[string]*EnumValueDescription)
+ for name, enumVal := range metadata.enumValues {
+ enums[name] = newEnumValueDescription(name, enumVal)
+ }
+ types := make(map[string]*TypeDescription)
+ for name, msgType := range metadata.msgTypes {
+ types[name] = newTypeDescription(name, msgType, pbdb.extensions)
+ }
+ fileExtMap := make(extensionMap)
+ for typeName, extensions := range metadata.msgExtensionMap {
+ messageExtMap, found := fileExtMap[typeName]
+ if !found {
+ messageExtMap = make(map[string]*FieldDescription)
+ }
+ for _, ext := range extensions {
+ extDesc := dynamicpb.NewExtensionType(ext).TypeDescriptor()
+ messageExtMap[string(ext.FullName())] = newFieldDescription(extDesc)
+ }
+ fileExtMap[typeName] = messageExtMap
+ }
+ return &FileDescription{
+ name: fileDesc.Path(),
+ types: types,
+ enums: enums,
+ }, fileExtMap
+}
+
+// FileDescription holds a map of all types and enum values declared within a proto file.
+type FileDescription struct {
+ name string
+ types map[string]*TypeDescription
+ enums map[string]*EnumValueDescription
+}
+
+// Copy creates a copy of the FileDescription with updated Db references within its types.
+func (fd *FileDescription) Copy(pbdb *Db) *FileDescription {
+ typesCopy := make(map[string]*TypeDescription, len(fd.types))
+ for k, v := range fd.types {
+ typesCopy[k] = v.Copy(pbdb)
+ }
+ return &FileDescription{
+ name: fd.name,
+ types: typesCopy,
+ enums: fd.enums,
+ }
+}
+
+// GetName returns the fully qualified file path for the file.
+func (fd *FileDescription) GetName() string {
+ return fd.name
+}
+
+// GetEnumDescription returns an EnumDescription for a qualified enum value
+// name declared within the .proto file.
+func (fd *FileDescription) GetEnumDescription(enumName string) (*EnumValueDescription, bool) {
+ ed, found := fd.enums[sanitizeProtoName(enumName)]
+ return ed, found
+}
+
+// GetEnumNames returns the string names of all enum values in the file.
+func (fd *FileDescription) GetEnumNames() []string {
+ enumNames := make([]string, len(fd.enums))
+ i := 0
+ for _, e := range fd.enums {
+ enumNames[i] = e.Name()
+ i++
+ }
+ return enumNames
+}
+
+// GetTypeDescription returns a TypeDescription for a qualified protobuf message type name
+// declared within the .proto file.
+func (fd *FileDescription) GetTypeDescription(typeName string) (*TypeDescription, bool) {
+ td, found := fd.types[sanitizeProtoName(typeName)]
+ return td, found
+}
+
+// GetTypeNames returns the list of all type names contained within the file.
+func (fd *FileDescription) GetTypeNames() []string {
+ typeNames := make([]string, len(fd.types))
+ i := 0
+ for _, t := range fd.types {
+ typeNames[i] = t.Name()
+ i++
+ }
+ return typeNames
+}
+
+// sanitizeProtoName strips the leading '.' from the proto message name.
+func sanitizeProtoName(name string) string {
+ if name != "" && name[0] == '.' {
+ return name[1:]
+ }
+ return name
+}
+
+// fileMetadata is a flattened view of message types and enum values within a file descriptor.
+type fileMetadata struct {
+ // msgTypes maps from fully-qualified message name to descriptor.
+ msgTypes map[string]protoreflect.MessageDescriptor
+ // enumValues maps from fully-qualified enum value to enum value descriptor.
+ enumValues map[string]protoreflect.EnumValueDescriptor
+ // msgExtensionMap maps from the protobuf message name being extended to a set of extensions
+ // for the type.
+ msgExtensionMap map[string][]protoreflect.ExtensionDescriptor
+
+ // TODO: support enum type definitions for use in future type-check enhancements.
+}
+
+// collectFileMetadata traverses the proto file object graph to collect message types and enum
+// values and index them by their fully qualified names.
+func collectFileMetadata(fileDesc protoreflect.FileDescriptor) *fileMetadata {
+ msgTypes := make(map[string]protoreflect.MessageDescriptor)
+ enumValues := make(map[string]protoreflect.EnumValueDescriptor)
+ msgExtensionMap := make(map[string][]protoreflect.ExtensionDescriptor)
+ collectMsgTypes(fileDesc.Messages(), msgTypes, enumValues, msgExtensionMap)
+ collectEnumValues(fileDesc.Enums(), enumValues)
+ collectExtensions(fileDesc.Extensions(), msgExtensionMap)
+ return &fileMetadata{
+ msgTypes: msgTypes,
+ enumValues: enumValues,
+ msgExtensionMap: msgExtensionMap,
+ }
+}
+
+// collectMsgTypes recursively collects messages, nested messages, and nested enums into a map of
+// fully qualified protobuf names to descriptors.
+func collectMsgTypes(msgTypes protoreflect.MessageDescriptors,
+ msgTypeMap map[string]protoreflect.MessageDescriptor,
+ enumValueMap map[string]protoreflect.EnumValueDescriptor,
+ msgExtensionMap map[string][]protoreflect.ExtensionDescriptor) {
+ for i := 0; i < msgTypes.Len(); i++ {
+ msgType := msgTypes.Get(i)
+ msgTypeMap[string(msgType.FullName())] = msgType
+ nestedMsgTypes := msgType.Messages()
+ if nestedMsgTypes.Len() != 0 {
+ collectMsgTypes(nestedMsgTypes, msgTypeMap, enumValueMap, msgExtensionMap)
+ }
+ nestedEnumTypes := msgType.Enums()
+ if nestedEnumTypes.Len() != 0 {
+ collectEnumValues(nestedEnumTypes, enumValueMap)
+ }
+ nestedExtensions := msgType.Extensions()
+ if nestedExtensions.Len() != 0 {
+ collectExtensions(nestedExtensions, msgExtensionMap)
+ }
+ }
+}
+
+// collectEnumValues accumulates the enum values within an enum declaration.
+func collectEnumValues(enumTypes protoreflect.EnumDescriptors, enumValueMap map[string]protoreflect.EnumValueDescriptor) {
+ for i := 0; i < enumTypes.Len(); i++ {
+ enumType := enumTypes.Get(i)
+ enumTypeValues := enumType.Values()
+ for j := 0; j < enumTypeValues.Len(); j++ {
+ enumValue := enumTypeValues.Get(j)
+ enumValueName := fmt.Sprintf("%s.%s", string(enumType.FullName()), string(enumValue.Name()))
+ enumValueMap[enumValueName] = enumValue
+ }
+ }
+}
+
+func collectExtensions(extensions protoreflect.ExtensionDescriptors, msgExtensionMap map[string][]protoreflect.ExtensionDescriptor) {
+ for i := 0; i < extensions.Len(); i++ {
+ ext := extensions.Get(i)
+ extendsMsg := string(ext.ContainingMessage().FullName())
+ msgExts, found := msgExtensionMap[extendsMsg]
+ if !found {
+ msgExts = []protoreflect.ExtensionDescriptor{}
+ }
+ msgExts = append(msgExts, ext)
+ msgExtensionMap[extendsMsg] = msgExts
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/pb.go b/vendor/github.com/google/cel-go/common/types/pb/pb.go
new file mode 100644
index 0000000..eadebcb
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/pb.go
@@ -0,0 +1,258 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pb reflects over protocol buffer descriptors to generate objects
+// that simplify type, enum, and field lookup.
+package pb
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ durpb "google.golang.org/protobuf/types/known/durationpb"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ tspb "google.golang.org/protobuf/types/known/timestamppb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Db maps from file / message / enum name to file description.
+//
+// Each Db is isolated from each other, and while information about protobuf descriptors may be
+// fetched from the global protobuf registry, no descriptors are added to this registry, else
+// the isolation guarantees of the Db object would be violated.
+type Db struct {
+ revFileDescriptorMap map[string]*FileDescription
+ // files contains the deduped set of FileDescriptions whose types are contained in the pb.Db.
+ files []*FileDescription
+ // extensions contains the mapping between a given type name, extension name and its FieldDescription
+ extensions map[string]map[string]*FieldDescription
+}
+
+// extensionsMap is a type alias to a map[typeName]map[extensionName]*FieldDescription
+type extensionMap = map[string]map[string]*FieldDescription
+
+var (
+ // DefaultDb used at evaluation time or unless overridden at check time.
+ DefaultDb = &Db{
+ revFileDescriptorMap: make(map[string]*FileDescription),
+ files: []*FileDescription{},
+ extensions: make(extensionMap),
+ }
+)
+
+// Merge will copy the source proto message into the destination, or error if the merge cannot be completed.
+//
+// Unlike the proto.Merge, this method will fallback to proto.Marshal/Unmarshal of the two proto messages do not
+// share the same instance of their type descriptor.
+func Merge(dstPB, srcPB proto.Message) error {
+ src, dst := srcPB.ProtoReflect(), dstPB.ProtoReflect()
+ if src.Descriptor() == dst.Descriptor() {
+ proto.Merge(dstPB, srcPB)
+ return nil
+ }
+ if src.Descriptor().FullName() != dst.Descriptor().FullName() {
+ return fmt.Errorf("pb.Merge() arguments must be the same type. got: %v, %v",
+ dst.Descriptor().FullName(), src.Descriptor().FullName())
+ }
+ bytes, err := proto.Marshal(srcPB)
+ if err != nil {
+ return fmt.Errorf("pb.Merge(dstPB, srcPB) failed to marshal source proto: %v", err)
+ }
+ err = proto.Unmarshal(bytes, dstPB)
+ if err != nil {
+ return fmt.Errorf("pb.Merge(dstPB, srcPB) failed to unmarshal to dest proto: %v", err)
+ }
+ return nil
+}
+
+// NewDb creates a new `pb.Db` with an empty type name to file description map.
+func NewDb() *Db {
+ pbdb := &Db{
+ revFileDescriptorMap: make(map[string]*FileDescription),
+ files: []*FileDescription{},
+ extensions: make(extensionMap),
+ }
+ // The FileDescription objects in the default db contain lazily initialized TypeDescription
+ // values which may point to the state contained in the DefaultDb irrespective of this shallow
+ // copy; however, the type graph for a field is idempotently computed, and is guaranteed to
+ // only be initialized once thanks to atomic values within the TypeDescription objects, so it
+ // is safe to share these values across instances.
+ for k, v := range DefaultDb.revFileDescriptorMap {
+ pbdb.revFileDescriptorMap[k] = v
+ }
+ pbdb.files = append(pbdb.files, DefaultDb.files...)
+ return pbdb
+}
+
+// Copy creates a copy of the current database with its own internal descriptor mapping.
+func (pbdb *Db) Copy() *Db {
+ copy := NewDb()
+ for _, fd := range pbdb.files {
+ hasFile := false
+ for _, fd2 := range copy.files {
+ if fd2 == fd {
+ hasFile = true
+ }
+ }
+ if !hasFile {
+ fd = fd.Copy(copy)
+ copy.files = append(copy.files, fd)
+ }
+ for _, enumValName := range fd.GetEnumNames() {
+ copy.revFileDescriptorMap[enumValName] = fd
+ }
+ for _, msgTypeName := range fd.GetTypeNames() {
+ copy.revFileDescriptorMap[msgTypeName] = fd
+ }
+ copy.revFileDescriptorMap[fd.GetName()] = fd
+ }
+ for typeName, extFieldMap := range pbdb.extensions {
+ copyExtFieldMap, found := copy.extensions[typeName]
+ if !found {
+ copyExtFieldMap = make(map[string]*FieldDescription, len(extFieldMap))
+ }
+ for extFieldName, fd := range extFieldMap {
+ copyExtFieldMap[extFieldName] = fd
+ }
+ copy.extensions[typeName] = copyExtFieldMap
+ }
+ return copy
+}
+
+// FileDescriptions returns the set of file descriptions associated with this db.
+func (pbdb *Db) FileDescriptions() []*FileDescription {
+ return pbdb.files
+}
+
+// RegisterDescriptor produces a `FileDescription` from a `FileDescriptor` and registers the
+// message and enum types into the `pb.Db`.
+func (pbdb *Db) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) (*FileDescription, error) {
+ fd, found := pbdb.revFileDescriptorMap[fileDesc.Path()]
+ if found {
+ return fd, nil
+ }
+ // Make sure to search the global registry to see if a protoreflect.FileDescriptor for
+ // the file specified has been linked into the binary. If so, use the copy of the descriptor
+ // from the global cache.
+ //
+ // Note: Proto reflection relies on descriptor values being object equal rather than object
+ // equivalence. This choice means that a FieldDescriptor generated from a FileDescriptorProto
+ // will be incompatible with the FieldDescriptor in the global registry and any message created
+ // from that global registry.
+ globalFD, err := protoregistry.GlobalFiles.FindFileByPath(fileDesc.Path())
+ if err == nil {
+ fileDesc = globalFD
+ }
+ var fileExtMap extensionMap
+ fd, fileExtMap = newFileDescription(fileDesc, pbdb)
+ for _, enumValName := range fd.GetEnumNames() {
+ pbdb.revFileDescriptorMap[enumValName] = fd
+ }
+ for _, msgTypeName := range fd.GetTypeNames() {
+ pbdb.revFileDescriptorMap[msgTypeName] = fd
+ }
+ pbdb.revFileDescriptorMap[fd.GetName()] = fd
+
+ // Return the specific file descriptor registered.
+ pbdb.files = append(pbdb.files, fd)
+
+ // Index the protobuf message extensions from the file into the pbdb
+ for typeName, extMap := range fileExtMap {
+ typeExtMap, found := pbdb.extensions[typeName]
+ if !found {
+ pbdb.extensions[typeName] = extMap
+ continue
+ }
+ for extName, field := range extMap {
+ typeExtMap[extName] = field
+ }
+ }
+ return fd, nil
+}
+
+// RegisterMessage produces a `FileDescription` from a `message` and registers the message and all
+// other definitions within the message file into the `pb.Db`.
+func (pbdb *Db) RegisterMessage(message proto.Message) (*FileDescription, error) {
+ msgDesc := message.ProtoReflect().Descriptor()
+ msgName := msgDesc.FullName()
+ typeName := sanitizeProtoName(string(msgName))
+ if fd, found := pbdb.revFileDescriptorMap[typeName]; found {
+ return fd, nil
+ }
+ return pbdb.RegisterDescriptor(msgDesc.ParentFile())
+}
+
+// DescribeEnum takes a qualified enum name and returns an `EnumDescription` if it exists in the
+// `pb.Db`.
+func (pbdb *Db) DescribeEnum(enumName string) (*EnumValueDescription, bool) {
+ enumName = sanitizeProtoName(enumName)
+ if fd, found := pbdb.revFileDescriptorMap[enumName]; found {
+ return fd.GetEnumDescription(enumName)
+ }
+ return nil, false
+}
+
+// DescribeType returns a `TypeDescription` for the `typeName` if it exists in the `pb.Db`.
+func (pbdb *Db) DescribeType(typeName string) (*TypeDescription, bool) {
+ typeName = sanitizeProtoName(typeName)
+ if fd, found := pbdb.revFileDescriptorMap[typeName]; found {
+ return fd.GetTypeDescription(typeName)
+ }
+ return nil, false
+}
+
+// CollectFileDescriptorSet builds a file descriptor set associated with the file where the input
+// message is declared.
+func CollectFileDescriptorSet(message proto.Message) map[string]protoreflect.FileDescriptor {
+ fdMap := map[string]protoreflect.FileDescriptor{}
+ parentFile := message.ProtoReflect().Descriptor().ParentFile()
+ fdMap[parentFile.Path()] = parentFile
+ // Initialize list of dependencies
+ deps := make([]protoreflect.FileImport, parentFile.Imports().Len())
+ for i := 0; i < parentFile.Imports().Len(); i++ {
+ deps[i] = parentFile.Imports().Get(i)
+ }
+ // Expand list for new dependencies
+ for i := 0; i < len(deps); i++ {
+ dep := deps[i]
+ if _, found := fdMap[dep.Path()]; found {
+ continue
+ }
+ fdMap[dep.Path()] = dep.FileDescriptor
+ for j := 0; j < dep.FileDescriptor.Imports().Len(); j++ {
+ deps = append(deps, dep.FileDescriptor.Imports().Get(j))
+ }
+ }
+ return fdMap
+}
+
+func init() {
+ // Describe well-known types to ensure they can always be resolved by the check and interpret
+ // execution phases.
+ //
+ // The following subset of message types is enough to ensure that all well-known types can
+ // resolved in the runtime, since describing the value results in describing the whole file
+ // where the message is declared.
+ DefaultDb.RegisterMessage(&anypb.Any{})
+ DefaultDb.RegisterMessage(&durpb.Duration{})
+ DefaultDb.RegisterMessage(&emptypb.Empty{})
+ DefaultDb.RegisterMessage(&tspb.Timestamp{})
+ DefaultDb.RegisterMessage(&structpb.Value{})
+ DefaultDb.RegisterMessage(&wrapperspb.BoolValue{})
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go
new file mode 100644
index 0000000..6cc95c2
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/type.go
@@ -0,0 +1,587 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ dynamicpb "google.golang.org/protobuf/types/dynamicpb"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ dpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ tpb "google.golang.org/protobuf/types/known/timestamppb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// description is a private interface used to make it convenient to perform type unwrapping at
+// the TypeDescription or FieldDescription level.
+type description interface {
+ // Zero returns an empty immutable protobuf message when the description is a protobuf message
+ // type.
+ Zero() proto.Message
+}
+
+// newTypeDescription produces a TypeDescription value for the fully-qualified proto type name
+// with a given descriptor.
+func newTypeDescription(typeName string, desc protoreflect.MessageDescriptor, extensions extensionMap) *TypeDescription {
+ msgType := dynamicpb.NewMessageType(desc)
+ msgZero := dynamicpb.NewMessage(desc)
+ fieldMap := map[string]*FieldDescription{}
+ fields := desc.Fields()
+ for i := 0; i < fields.Len(); i++ {
+ f := fields.Get(i)
+ fieldMap[string(f.Name())] = newFieldDescription(f)
+ }
+ return &TypeDescription{
+ typeName: typeName,
+ desc: desc,
+ msgType: msgType,
+ fieldMap: fieldMap,
+ extensions: extensions,
+ reflectType: reflectTypeOf(msgZero),
+ zeroMsg: zeroValueOf(msgZero),
+ }
+}
+
+// TypeDescription is a collection of type metadata relevant to expression
+// checking and evaluation.
+type TypeDescription struct {
+ typeName string
+ desc protoreflect.MessageDescriptor
+ msgType protoreflect.MessageType
+ fieldMap map[string]*FieldDescription
+ extensions extensionMap
+ reflectType reflect.Type
+ zeroMsg proto.Message
+}
+
+// Copy copies the type description with updated references to the Db.
+func (td *TypeDescription) Copy(pbdb *Db) *TypeDescription {
+ return &TypeDescription{
+ typeName: td.typeName,
+ desc: td.desc,
+ msgType: td.msgType,
+ fieldMap: td.fieldMap,
+ extensions: pbdb.extensions,
+ reflectType: td.reflectType,
+ zeroMsg: td.zeroMsg,
+ }
+}
+
+// FieldMap returns a string field name to FieldDescription map.
+func (td *TypeDescription) FieldMap() map[string]*FieldDescription {
+ return td.fieldMap
+}
+
+// FieldByName returns (FieldDescription, true) if the field name is declared within the type.
+func (td *TypeDescription) FieldByName(name string) (*FieldDescription, bool) {
+ fd, found := td.fieldMap[name]
+ if found {
+ return fd, true
+ }
+ extFieldMap, found := td.extensions[td.typeName]
+ if !found {
+ return nil, false
+ }
+ fd, found = extFieldMap[name]
+ return fd, found
+}
+
+// MaybeUnwrap accepts a proto message as input and unwraps it to a primitive CEL type if possible.
+//
+// This method returns the unwrapped value and 'true', else the original value and 'false'.
+func (td *TypeDescription) MaybeUnwrap(msg proto.Message) (any, bool, error) {
+ return unwrap(td, msg)
+}
+
+// Name returns the fully-qualified name of the type.
+func (td *TypeDescription) Name() string {
+ return string(td.desc.FullName())
+}
+
+// New returns a mutable proto message
+func (td *TypeDescription) New() protoreflect.Message {
+ return td.msgType.New()
+}
+
+// ReflectType returns the Golang reflect.Type for this type.
+func (td *TypeDescription) ReflectType() reflect.Type {
+ return td.reflectType
+}
+
+// Zero returns the zero proto.Message value for this type.
+func (td *TypeDescription) Zero() proto.Message {
+ return td.zeroMsg
+}
+
+// newFieldDescription creates a new field description from a protoreflect.FieldDescriptor.
+func newFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescription {
+ var reflectType reflect.Type
+ var zeroMsg proto.Message
+ switch fieldDesc.Kind() {
+ case protoreflect.EnumKind:
+ reflectType = reflectTypeOf(protoreflect.EnumNumber(0))
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ zeroMsg = dynamicpb.NewMessage(fieldDesc.Message())
+ reflectType = reflectTypeOf(zeroMsg)
+ default:
+ reflectType = reflectTypeOf(fieldDesc.Default().Interface())
+ if fieldDesc.IsList() {
+ var elemValue protoreflect.Value
+ if fieldDesc.IsExtension() {
+ et := dynamicpb.NewExtensionType(fieldDesc)
+ elemValue = et.New().List().NewElement()
+ } else {
+ parentMsgType := fieldDesc.ContainingMessage()
+ parentMsg := dynamicpb.NewMessage(parentMsgType)
+ listField := parentMsg.NewField(fieldDesc).List()
+ elemValue = listField.NewElement()
+ }
+ elem := elemValue.Interface()
+ switch elemType := elem.(type) {
+ case protoreflect.Message:
+ elem = elemType.Interface()
+ }
+ reflectType = reflectTypeOf(elem)
+ }
+ }
+ // Ensure the list type is appropriately reflected as a Go-native list.
+ if fieldDesc.IsList() {
+ reflectType = reflect.SliceOf(reflectType)
+ }
+ var keyType, valType *FieldDescription
+ if fieldDesc.IsMap() {
+ keyType = newFieldDescription(fieldDesc.MapKey())
+ valType = newFieldDescription(fieldDesc.MapValue())
+ }
+ return &FieldDescription{
+ desc: fieldDesc,
+ KeyType: keyType,
+ ValueType: valType,
+ reflectType: reflectType,
+ zeroMsg: zeroValueOf(zeroMsg),
+ }
+}
+
+// FieldDescription holds metadata related to fields declared within a type.
+type FieldDescription struct {
+ // KeyType holds the key FieldDescription for map fields.
+ KeyType *FieldDescription
+ // ValueType holds the value FieldDescription for map fields.
+ ValueType *FieldDescription
+
+ desc protoreflect.FieldDescriptor
+ reflectType reflect.Type
+ zeroMsg proto.Message
+}
+
+// CheckedType returns the type-definition used at type-check time.
+func (fd *FieldDescription) CheckedType() *exprpb.Type {
+ if fd.desc.IsMap() {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MapType_{
+ MapType: &exprpb.Type_MapType{
+ KeyType: fd.KeyType.typeDefToType(),
+ ValueType: fd.ValueType.typeDefToType(),
+ },
+ },
+ }
+ }
+ if fd.desc.IsList() {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_ListType_{
+ ListType: &exprpb.Type_ListType{
+ ElemType: fd.typeDefToType()}}}
+ }
+ return fd.typeDefToType()
+}
+
+// Descriptor returns the protoreflect.FieldDescriptor for this type.
+func (fd *FieldDescription) Descriptor() protoreflect.FieldDescriptor {
+ return fd.desc
+}
+
+// IsSet returns whether the field is set on the target value, per the proto presence conventions
+// of proto2 or proto3 accordingly.
+//
+// This function implements the FieldType.IsSet function contract which can be used to operate on
+// more than just protobuf field accesses; however, the target here must be a protobuf.Message.
+func (fd *FieldDescription) IsSet(target any) bool {
+ switch v := target.(type) {
+ case proto.Message:
+ pbRef := v.ProtoReflect()
+ pbDesc := pbRef.Descriptor()
+ if pbDesc == fd.desc.ContainingMessage() {
+ // When the target protobuf shares the same message descriptor instance as the field
+ // descriptor, use the cached field descriptor value.
+ return pbRef.Has(fd.desc)
+ }
+ // Otherwise, fallback to a dynamic lookup of the field descriptor from the target
+ // instance as an attempt to use the cached field descriptor will result in a panic.
+ return pbRef.Has(pbDesc.Fields().ByName(protoreflect.Name(fd.Name())))
+ default:
+ return false
+ }
+}
+
+// GetFrom returns the accessor method associated with the field on the proto generated struct.
+//
+// If the field is not set, the proto default value is returned instead.
+//
+// This function implements the FieldType.GetFrom function contract which can be used to operate
+// on more than just protobuf field accesses; however, the target here must be a protobuf.Message.
+func (fd *FieldDescription) GetFrom(target any) (any, error) {
+ v, ok := target.(proto.Message)
+ if !ok {
+ return nil, fmt.Errorf("unsupported field selection target: (%T)%v", target, target)
+ }
+ pbRef := v.ProtoReflect()
+ pbDesc := pbRef.Descriptor()
+ var fieldVal any
+ if pbDesc == fd.desc.ContainingMessage() {
+ // When the target protobuf shares the same message descriptor instance as the field
+ // descriptor, use the cached field descriptor value.
+ fieldVal = pbRef.Get(fd.desc).Interface()
+ } else {
+ // Otherwise, fallback to a dynamic lookup of the field descriptor from the target
+ // instance as an attempt to use the cached field descriptor will result in a panic.
+ fieldVal = pbRef.Get(pbDesc.Fields().ByName(protoreflect.Name(fd.Name()))).Interface()
+ }
+ switch fv := fieldVal.(type) {
+ // Fast-path return for primitive types.
+ case bool, []byte, float32, float64, int32, int64, string, uint32, uint64, protoreflect.List:
+ return fv, nil
+ case protoreflect.EnumNumber:
+ return int64(fv), nil
+ case protoreflect.Map:
+ // Return a wrapper around the protobuf-reflected Map types which carries additional
+ // information about the key and value definitions of the map.
+ return &Map{Map: fv, KeyType: fd.KeyType, ValueType: fd.ValueType}, nil
+ case protoreflect.Message:
+ // Make sure to unwrap well-known protobuf types before returning.
+ unwrapped, _, err := fd.MaybeUnwrapDynamic(fv)
+ return unwrapped, err
+ default:
+ return fv, nil
+ }
+}
+
+// IsEnum returns true if the field type refers to an enum value.
+func (fd *FieldDescription) IsEnum() bool {
+ return fd.ProtoKind() == protoreflect.EnumKind
+}
+
+// IsMap returns true if the field is of map type.
+func (fd *FieldDescription) IsMap() bool {
+ return fd.desc.IsMap()
+}
+
+// IsMessage returns true if the field is of message type.
+func (fd *FieldDescription) IsMessage() bool {
+ kind := fd.ProtoKind()
+ return kind == protoreflect.MessageKind || kind == protoreflect.GroupKind
+}
+
+// IsOneof returns true if the field is declared within a oneof block.
+func (fd *FieldDescription) IsOneof() bool {
+ return fd.desc.ContainingOneof() != nil
+}
+
+// IsList returns true if the field is a repeated value.
+//
+// This method will also return true for map values, so check whether the
+// field is also a map.
+func (fd *FieldDescription) IsList() bool {
+ return fd.desc.IsList()
+}
+
+// MaybeUnwrapDynamic takes the reflected protoreflect.Message and determines whether the
+// value can be unwrapped to a more primitive CEL type.
+//
+// This function returns the unwrapped value and 'true' on success, or the original value
+// and 'false' otherwise.
+func (fd *FieldDescription) MaybeUnwrapDynamic(msg protoreflect.Message) (any, bool, error) {
+ return unwrapDynamic(fd, msg)
+}
+
+// Name returns the CamelCase name of the field within the proto-based struct.
+func (fd *FieldDescription) Name() string {
+ return string(fd.desc.Name())
+}
+
+// ProtoKind returns the protobuf reflected kind of the field.
+func (fd *FieldDescription) ProtoKind() protoreflect.Kind {
+ return fd.desc.Kind()
+}
+
+// ReflectType returns the Golang reflect.Type for this field.
+func (fd *FieldDescription) ReflectType() reflect.Type {
+ return fd.reflectType
+}
+
+// String returns the fully qualified name of the field within its type as well as whether the
+// field occurs within a oneof.
+func (fd *FieldDescription) String() string {
+ return fmt.Sprintf("%v.%s `oneof=%t`", fd.desc.ContainingMessage().FullName(), fd.Name(), fd.IsOneof())
+}
+
+// Zero returns the zero value for the protobuf message represented by this field.
+//
+// If the field is not a proto.Message type, the zero value is nil.
+func (fd *FieldDescription) Zero() proto.Message {
+ return fd.zeroMsg
+}
+
+func (fd *FieldDescription) typeDefToType() *exprpb.Type {
+ if fd.IsMessage() {
+ msgType := string(fd.desc.Message().FullName())
+ if wk, found := CheckedWellKnowns[msgType]; found {
+ return wk
+ }
+ return checkedMessageType(msgType)
+ }
+ if fd.IsEnum() {
+ return checkedInt
+ }
+ return CheckedPrimitives[fd.ProtoKind()]
+}
+
+// Map wraps the protoreflect.Map object with a key and value FieldDescription for use in
+// retrieving individual elements within CEL value data types.
+type Map struct {
+ protoreflect.Map
+ KeyType *FieldDescription
+ ValueType *FieldDescription
+}
+
+func checkedMessageType(name string) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MessageType{MessageType: name}}
+}
+
+func checkedPrimitive(primitive exprpb.Type_PrimitiveType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Primitive{Primitive: primitive}}
+}
+
+func checkedWellKnown(wellKnown exprpb.Type_WellKnownType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_WellKnown{WellKnown: wellKnown}}
+}
+
+func checkedWrap(t *exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Wrapper{Wrapper: t.GetPrimitive()}}
+}
+
+// unwrap unwraps the provided proto.Message value, potentially based on the description if the
+// input message is a *dynamicpb.Message which obscures the typing information from Go.
+//
+// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'.
+func unwrap(desc description, msg proto.Message) (any, bool, error) {
+ switch v := msg.(type) {
+ case *anypb.Any:
+ dynMsg, err := v.UnmarshalNew()
+ if err != nil {
+ return v, false, err
+ }
+ return unwrapDynamic(desc, dynMsg.ProtoReflect())
+ case *dynamicpb.Message:
+ return unwrapDynamic(desc, v)
+ case *dpb.Duration:
+ return v.AsDuration(), true, nil
+ case *tpb.Timestamp:
+ return v.AsTime(), true, nil
+ case *structpb.Value:
+ switch v.GetKind().(type) {
+ case *structpb.Value_BoolValue:
+ return v.GetBoolValue(), true, nil
+ case *structpb.Value_ListValue:
+ return v.GetListValue(), true, nil
+ case *structpb.Value_NullValue:
+ return structpb.NullValue_NULL_VALUE, true, nil
+ case *structpb.Value_NumberValue:
+ return v.GetNumberValue(), true, nil
+ case *structpb.Value_StringValue:
+ return v.GetStringValue(), true, nil
+ case *structpb.Value_StructValue:
+ return v.GetStructValue(), true, nil
+ default:
+ return structpb.NullValue_NULL_VALUE, true, nil
+ }
+ case *wrapperspb.BoolValue:
+ return v.GetValue(), true, nil
+ case *wrapperspb.BytesValue:
+ return v.GetValue(), true, nil
+ case *wrapperspb.DoubleValue:
+ return v.GetValue(), true, nil
+ case *wrapperspb.FloatValue:
+ return float64(v.GetValue()), true, nil
+ case *wrapperspb.Int32Value:
+ return int64(v.GetValue()), true, nil
+ case *wrapperspb.Int64Value:
+ return v.GetValue(), true, nil
+ case *wrapperspb.StringValue:
+ return v.GetValue(), true, nil
+ case *wrapperspb.UInt32Value:
+ return uint64(v.GetValue()), true, nil
+ case *wrapperspb.UInt64Value:
+ return v.GetValue(), true, nil
+ }
+ return msg, false, nil
+}
+
+// unwrapDynamic unwraps a reflected protobuf Message value.
+//
+// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'.
+func unwrapDynamic(desc description, refMsg protoreflect.Message) (any, bool, error) {
+ msg := refMsg.Interface()
+ if !refMsg.IsValid() {
+ msg = desc.Zero()
+ }
+ // In order to ensure that these wrapped types match the expectations of the CEL type system
+ // the dynamicpb.Message must be merged with an protobuf instance of the well-known type value.
+ typeName := string(refMsg.Descriptor().FullName())
+ switch typeName {
+ case "google.protobuf.Any":
+ // Note, Any values require further unwrapping; however, this unwrapping may or may not
+ // be to a well-known type. If the unwrapped value is a well-known type it will be further
+ // unwrapped before being returned to the caller. Otherwise, the dynamic protobuf object
+ // represented by the Any will be returned.
+ unwrappedAny := &anypb.Any{}
+ err := Merge(unwrappedAny, msg)
+ if err != nil {
+ return nil, false, fmt.Errorf("unwrap dynamic field failed: %v", err)
+ }
+ dynMsg, err := unwrappedAny.UnmarshalNew()
+ if err != nil {
+ // Allow the error to move further up the stack as it should result in an type
+ // conversion error if the caller does not recover it somehow.
+ return nil, false, fmt.Errorf("unmarshal dynamic any failed: %v", err)
+ }
+ // Attempt to unwrap the dynamic type, otherwise return the dynamic message.
+ unwrapped, nested, err := unwrapDynamic(desc, dynMsg.ProtoReflect())
+ if err == nil && nested {
+ return unwrapped, true, nil
+ }
+ return dynMsg, true, err
+ case "google.protobuf.BoolValue",
+ "google.protobuf.BytesValue",
+ "google.protobuf.DoubleValue",
+ "google.protobuf.FloatValue",
+ "google.protobuf.Int32Value",
+ "google.protobuf.Int64Value",
+ "google.protobuf.StringValue",
+ "google.protobuf.UInt32Value",
+ "google.protobuf.UInt64Value":
+ // The msg value is ignored when dealing with wrapper types as they have a null or value
+ // behavior, rather than the standard zero value behavior of other proto message types.
+ if !refMsg.IsValid() {
+ return structpb.NullValue_NULL_VALUE, true, nil
+ }
+ valueField := refMsg.Descriptor().Fields().ByName("value")
+ return refMsg.Get(valueField).Interface(), true, nil
+ case "google.protobuf.Duration":
+ unwrapped := &dpb.Duration{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrapped.AsDuration(), true, nil
+ case "google.protobuf.ListValue":
+ unwrapped := &structpb.ListValue{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrapped, true, nil
+ case "google.protobuf.NullValue":
+ return structpb.NullValue_NULL_VALUE, true, nil
+ case "google.protobuf.Struct":
+ unwrapped := &structpb.Struct{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrapped, true, nil
+ case "google.protobuf.Timestamp":
+ unwrapped := &tpb.Timestamp{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrapped.AsTime(), true, nil
+ case "google.protobuf.Value":
+ unwrapped := &structpb.Value{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrap(desc, unwrapped)
+ }
+ return msg, false, nil
+}
+
+// reflectTypeOf intercepts the reflect.Type call to ensure that dynamicpb.Message types preserve
+// well-known protobuf reflected types expected by the CEL type system.
+func reflectTypeOf(val any) reflect.Type {
+ switch v := val.(type) {
+ case proto.Message:
+ return reflect.TypeOf(zeroValueOf(v))
+ default:
+ return reflect.TypeOf(v)
+ }
+}
+
+// zeroValueOf will return the strongest possible proto.Message representing the default protobuf
+// message value of the input msg type.
+func zeroValueOf(msg proto.Message) proto.Message {
+ if msg == nil {
+ return nil
+ }
+ typeName := string(msg.ProtoReflect().Descriptor().FullName())
+ zeroVal, found := zeroValueMap[typeName]
+ if found {
+ return zeroVal
+ }
+ return msg
+}
+
+var (
+ jsonValueTypeURL = "types.googleapis.com/google.protobuf.Value"
+
+ zeroValueMap = map[string]proto.Message{
+ "google.protobuf.Any": &anypb.Any{TypeUrl: jsonValueTypeURL},
+ "google.protobuf.Duration": &dpb.Duration{},
+ "google.protobuf.ListValue": &structpb.ListValue{},
+ "google.protobuf.Struct": &structpb.Struct{},
+ "google.protobuf.Timestamp": &tpb.Timestamp{},
+ "google.protobuf.Value": &structpb.Value{},
+ "google.protobuf.BoolValue": wrapperspb.Bool(false),
+ "google.protobuf.BytesValue": wrapperspb.Bytes([]byte{}),
+ "google.protobuf.DoubleValue": wrapperspb.Double(0.0),
+ "google.protobuf.FloatValue": wrapperspb.Float(0.0),
+ "google.protobuf.Int32Value": wrapperspb.Int32(0),
+ "google.protobuf.Int64Value": wrapperspb.Int64(0),
+ "google.protobuf.StringValue": wrapperspb.String(""),
+ "google.protobuf.UInt32Value": wrapperspb.UInt32(0),
+ "google.protobuf.UInt64Value": wrapperspb.UInt64(0),
+ }
+)
diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go
new file mode 100644
index 0000000..e80b462
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/provider.go
@@ -0,0 +1,713 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ dpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ tpb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// Adapter converts native Go values of varying type and complexity to equivalent CEL values.
+type Adapter = ref.TypeAdapter
+
+// Provider specifies functions for creating new object instances and for resolving
+// enum values by name.
+type Provider interface {
+ // EnumValue returns the numeric value of the given enum value name.
+ EnumValue(enumName string) ref.Val
+
+ // FindIdent takes a qualified identifier name and returns a ref.Val if one exists.
+ FindIdent(identName string) (ref.Val, bool)
+
+ // FindStructType returns the Type give a qualified type name.
+ //
+ // For historical reasons, only struct types are expected to be returned through this
+ // method, and the type values are expected to be wrapped in a TypeType instance using
+ // TypeTypeWithParam().
+ //
+ // Returns false if not found.
+ FindStructType(structType string) (*Type, bool)
+
+ // FieldStructFieldType returns the field type for a checked type value. Returns
+ // false if the field could not be found.
+ FindStructFieldType(structType, fieldName string) (*FieldType, bool)
+
+ // NewValue creates a new type value from a qualified name and map of field
+ // name to value.
+ //
+ // Note, for each value, the Val.ConvertToNative function will be invoked
+ // to convert the Val to the field's native type. If an error occurs during
+ // conversion, the NewValue will be a types.Err.
+ NewValue(structType string, fields map[string]ref.Val) ref.Val
+}
+
+// FieldType represents a field's type value and whether that field supports presence detection.
+type FieldType struct {
+ // Type of the field as a CEL native type value.
+ Type *Type
+
+ // IsSet indicates whether the field is set on an input object.
+ IsSet ref.FieldTester
+
+ // GetFrom retrieves the field value on the input object, if set.
+ GetFrom ref.FieldGetter
+}
+
+// Registry provides type information for a set of registered types.
+type Registry struct {
+ revTypeMap map[string]*Type
+ pbdb *pb.Db
+}
+
+// NewRegistry accepts a list of proto message instances and returns a type
+// provider which can create new instances of the provided message or any
+// message that proto depends upon in its FileDescriptor.
+func NewRegistry(types ...proto.Message) (*Registry, error) {
+ p := &Registry{
+ revTypeMap: make(map[string]*Type),
+ pbdb: pb.NewDb(),
+ }
+ err := p.RegisterType(
+ BoolType,
+ BytesType,
+ DoubleType,
+ DurationType,
+ IntType,
+ ListType,
+ MapType,
+ NullType,
+ StringType,
+ TimestampType,
+ TypeType,
+ UintType)
+ if err != nil {
+ return nil, err
+ }
+ // This block ensures that the well-known protobuf types are registered by default.
+ for _, fd := range p.pbdb.FileDescriptions() {
+ err = p.registerAllTypes(fd)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, msgType := range types {
+ err = p.RegisterMessage(msgType)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return p, nil
+}
+
+// NewEmptyRegistry returns a registry which is completely unconfigured.
+func NewEmptyRegistry() *Registry {
+ return &Registry{
+ revTypeMap: make(map[string]*Type),
+ pbdb: pb.NewDb(),
+ }
+}
+
+// Copy copies the current state of the registry into its own memory space.
+func (p *Registry) Copy() *Registry {
+ copy := &Registry{
+ revTypeMap: make(map[string]*Type),
+ pbdb: p.pbdb.Copy(),
+ }
+ for k, v := range p.revTypeMap {
+ copy.revTypeMap[k] = v
+ }
+ return copy
+}
+
+// EnumValue returns the numeric value of the given enum value name.
+func (p *Registry) EnumValue(enumName string) ref.Val {
+ enumVal, found := p.pbdb.DescribeEnum(enumName)
+ if !found {
+ return NewErr("unknown enum name '%s'", enumName)
+ }
+ return Int(enumVal.Value())
+}
+
+// FieldFieldType returns the field type for a checked type value. Returns false if
+// the field could not be found.
+//
+// Deprecated: use FindStructFieldType
+func (p *Registry) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) {
+ msgType, found := p.pbdb.DescribeType(structType)
+ if !found {
+ return nil, false
+ }
+ field, found := msgType.FieldByName(fieldName)
+ if !found {
+ return nil, false
+ }
+ return &ref.FieldType{
+ Type: field.CheckedType(),
+ IsSet: field.IsSet,
+ GetFrom: field.GetFrom}, true
+}
+
+// FieldStructFieldType returns the field type for a checked type value. Returns
+// false if the field could not be found.
+func (p *Registry) FindStructFieldType(structType, fieldName string) (*FieldType, bool) {
+ msgType, found := p.pbdb.DescribeType(structType)
+ if !found {
+ return nil, false
+ }
+ field, found := msgType.FieldByName(fieldName)
+ if !found {
+ return nil, false
+ }
+ return &FieldType{
+ Type: fieldDescToCELType(field),
+ IsSet: field.IsSet,
+ GetFrom: field.GetFrom}, true
+}
+
+// FindIdent takes a qualified identifier name and returns a ref.Val if one exists.
+func (p *Registry) FindIdent(identName string) (ref.Val, bool) {
+ if t, found := p.revTypeMap[identName]; found {
+ return t, true
+ }
+ if enumVal, found := p.pbdb.DescribeEnum(identName); found {
+ return Int(enumVal.Value()), true
+ }
+ return nil, false
+}
+
+// FindType looks up the Type given a qualified typeName. Returns false if not found.
+//
+// Deprecated: use FindStructType
+func (p *Registry) FindType(structType string) (*exprpb.Type, bool) {
+ if _, found := p.pbdb.DescribeType(structType); !found {
+ return nil, false
+ }
+ if structType != "" && structType[0] == '.' {
+ structType = structType[1:]
+ }
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Type{
+ Type: &exprpb.Type{
+ TypeKind: &exprpb.Type_MessageType{
+ MessageType: structType}}}}, true
+}
+
+// FindStructType returns the Type give a qualified type name.
+//
+// For historical reasons, only struct types are expected to be returned through this
+// method, and the type values are expected to be wrapped in a TypeType instance using
+// TypeTypeWithParam().
+//
+// Returns false if not found.
+func (p *Registry) FindStructType(structType string) (*Type, bool) {
+ if _, found := p.pbdb.DescribeType(structType); !found {
+ return nil, false
+ }
+ if structType != "" && structType[0] == '.' {
+ structType = structType[1:]
+ }
+ return NewTypeTypeWithParam(NewObjectType(structType)), true
+}
+
+// NewValue creates a new type value from a qualified name and map of field
+// name to value.
+//
+// Note, for each value, the Val.ConvertToNative function will be invoked
+// to convert the Val to the field's native type. If an error occurs during
+// conversion, the NewValue will be a types.Err.
+func (p *Registry) NewValue(structType string, fields map[string]ref.Val) ref.Val {
+ td, found := p.pbdb.DescribeType(structType)
+ if !found {
+ return NewErr("unknown type '%s'", structType)
+ }
+ msg := td.New()
+ fieldMap := td.FieldMap()
+ for name, value := range fields {
+ field, found := fieldMap[name]
+ if !found {
+ return NewErr("no such field: %s", name)
+ }
+ err := msgSetField(msg, field, value)
+ if err != nil {
+ return &Err{err}
+ }
+ }
+ return p.NativeToValue(msg.Interface())
+}
+
+// RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`.
+func (p *Registry) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error {
+ fd, err := p.pbdb.RegisterDescriptor(fileDesc)
+ if err != nil {
+ return err
+ }
+ return p.registerAllTypes(fd)
+}
+
+// RegisterMessage registers a protocol buffer message and its dependencies.
+func (p *Registry) RegisterMessage(message proto.Message) error {
+ fd, err := p.pbdb.RegisterMessage(message)
+ if err != nil {
+ return err
+ }
+ return p.registerAllTypes(fd)
+}
+
+// RegisterType registers a type value with the provider which ensures the provider is aware of how to
+// map the type to an identifier.
+//
+// If the `ref.Type` value is a `*types.Type` it will be registered directly by its runtime type name.
+// If the `ref.Type` value is not a `*types.Type` instance, a `*types.Type` instance which reflects the
+// traits present on the input and the runtime type name. By default this foreign type will be treated
+// as a types.StructKind. To avoid potential issues where the `ref.Type` values does not match the
+// generated `*types.Type` instance, consider always using the `*types.Type` to represent type extensions
+// to CEL, even when they're not based on protobuf types.
+func (p *Registry) RegisterType(types ...ref.Type) error {
+ for _, t := range types {
+ celType := maybeForeignType(t)
+ existing, found := p.revTypeMap[t.TypeName()]
+ if !found {
+ p.revTypeMap[t.TypeName()] = celType
+ continue
+ }
+ if !existing.IsEquivalentType(celType) {
+ return fmt.Errorf("type registration conflict. found: %v, input: %v", existing, celType)
+ }
+ if existing.traitMask != celType.traitMask {
+ return fmt.Errorf(
+ "type registered with conflicting traits: %v with traits %v, input: %v",
+ existing.TypeName(), existing.traitMask, celType.traitMask)
+ }
+ }
+ return nil
+}
+
+// NativeToValue converts various "native" types to ref.Val with this specific implementation
+// providing support for custom proto-based types.
+//
+// This method should be the inverse of ref.Val.ConvertToNative.
+func (p *Registry) NativeToValue(value any) ref.Val {
+ if val, found := nativeToValue(p, value); found {
+ return val
+ }
+ switch v := value.(type) {
+ case proto.Message:
+ typeName := string(v.ProtoReflect().Descriptor().FullName())
+ td, found := p.pbdb.DescribeType(typeName)
+ if !found {
+ return NewErr("unknown type: '%s'", typeName)
+ }
+ unwrapped, isUnwrapped, err := td.MaybeUnwrap(v)
+ if err != nil {
+ return UnsupportedRefValConversionErr(v)
+ }
+ if isUnwrapped {
+ return p.NativeToValue(unwrapped)
+ }
+ typeVal, found := p.FindIdent(typeName)
+ if !found {
+ return NewErr("unknown type: '%s'", typeName)
+ }
+ return NewObject(p, td, typeVal, v)
+ case *pb.Map:
+ return NewProtoMap(p, v)
+ case protoreflect.List:
+ return NewProtoList(p, v)
+ case protoreflect.Message:
+ return p.NativeToValue(v.Interface())
+ case protoreflect.Value:
+ return p.NativeToValue(v.Interface())
+ }
+ return UnsupportedRefValConversionErr(value)
+}
+
+func (p *Registry) registerAllTypes(fd *pb.FileDescription) error {
+ for _, typeName := range fd.GetTypeNames() {
+ // skip well-known type names since they're automatically sanitized
+ // during NewObjectType() calls.
+ if _, found := checkedWellKnowns[typeName]; found {
+ continue
+ }
+ err := p.RegisterType(NewObjectTypeValue(typeName))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func fieldDescToCELType(field *pb.FieldDescription) *Type {
+ if field.IsMap() {
+ return NewMapType(
+ singularFieldDescToCELType(field.KeyType),
+ singularFieldDescToCELType(field.ValueType))
+ }
+ if field.IsList() {
+ return NewListType(singularFieldDescToCELType(field))
+ }
+ return singularFieldDescToCELType(field)
+}
+
+func singularFieldDescToCELType(field *pb.FieldDescription) *Type {
+ if field.IsMessage() {
+ return NewObjectType(string(field.Descriptor().Message().FullName()))
+ }
+ if field.IsEnum() {
+ return IntType
+ }
+ return ProtoCELPrimitives[field.ProtoKind()]
+}
+
+// defaultTypeAdapter converts go native types to CEL values.
+type defaultTypeAdapter struct{}
+
+var (
+ // DefaultTypeAdapter adapts canonical CEL types from their equivalent Go values.
+ DefaultTypeAdapter = &defaultTypeAdapter{}
+)
+
+// NativeToValue implements the ref.TypeAdapter interface.
+func (a *defaultTypeAdapter) NativeToValue(value any) ref.Val {
+ if val, found := nativeToValue(a, value); found {
+ return val
+ }
+ return UnsupportedRefValConversionErr(value)
+}
+
+// nativeToValue returns the converted (ref.Val, true) of a conversion is found,
+// otherwise (nil, false)
+func nativeToValue(a Adapter, value any) (ref.Val, bool) {
+ switch v := value.(type) {
+ case nil:
+ return NullValue, true
+ case *Bool:
+ if v != nil {
+ return *v, true
+ }
+ case *Bytes:
+ if v != nil {
+ return *v, true
+ }
+ case *Double:
+ if v != nil {
+ return *v, true
+ }
+ case *Int:
+ if v != nil {
+ return *v, true
+ }
+ case *String:
+ if v != nil {
+ return *v, true
+ }
+ case *Uint:
+ if v != nil {
+ return *v, true
+ }
+ case bool:
+ return Bool(v), true
+ case int:
+ return Int(v), true
+ case int32:
+ return Int(v), true
+ case int64:
+ return Int(v), true
+ case uint:
+ return Uint(v), true
+ case uint32:
+ return Uint(v), true
+ case uint64:
+ return Uint(v), true
+ case float32:
+ return Double(v), true
+ case float64:
+ return Double(v), true
+ case string:
+ return String(v), true
+ case *dpb.Duration:
+ return Duration{Duration: v.AsDuration()}, true
+ case time.Duration:
+ return Duration{Duration: v}, true
+ case *tpb.Timestamp:
+ return Timestamp{Time: v.AsTime()}, true
+ case time.Time:
+ return Timestamp{Time: v}, true
+ case *bool:
+ if v != nil {
+ return Bool(*v), true
+ }
+ case *float32:
+ if v != nil {
+ return Double(*v), true
+ }
+ case *float64:
+ if v != nil {
+ return Double(*v), true
+ }
+ case *int:
+ if v != nil {
+ return Int(*v), true
+ }
+ case *int32:
+ if v != nil {
+ return Int(*v), true
+ }
+ case *int64:
+ if v != nil {
+ return Int(*v), true
+ }
+ case *string:
+ if v != nil {
+ return String(*v), true
+ }
+ case *uint:
+ if v != nil {
+ return Uint(*v), true
+ }
+ case *uint32:
+ if v != nil {
+ return Uint(*v), true
+ }
+ case *uint64:
+ if v != nil {
+ return Uint(*v), true
+ }
+ case []byte:
+ return Bytes(v), true
+ // specializations for common lists types.
+ case []string:
+ return NewStringList(a, v), true
+ case []ref.Val:
+ return NewRefValList(a, v), true
+ // specializations for common map types.
+ case map[string]string:
+ return NewStringStringMap(a, v), true
+ case map[string]any:
+ return NewStringInterfaceMap(a, v), true
+ case map[ref.Val]ref.Val:
+ return NewRefValMap(a, v), true
+ // additional specializations may be added upon request / need.
+ case *anypb.Any:
+ if v == nil {
+ return UnsupportedRefValConversionErr(v), true
+ }
+ unpackedAny, err := v.UnmarshalNew()
+ if err != nil {
+ return NewErr("anypb.UnmarshalNew() failed for type %q: %v", v.GetTypeUrl(), err), true
+ }
+ return a.NativeToValue(unpackedAny), true
+ case *structpb.NullValue, structpb.NullValue:
+ return NullValue, true
+ case *structpb.ListValue:
+ return NewJSONList(a, v), true
+ case *structpb.Struct:
+ return NewJSONStruct(a, v), true
+ case ref.Val:
+ return v, true
+ case protoreflect.EnumNumber:
+ return Int(v), true
+ case proto.Message:
+ if v == nil {
+ return UnsupportedRefValConversionErr(v), true
+ }
+ typeName := string(v.ProtoReflect().Descriptor().FullName())
+ td, found := pb.DefaultDb.DescribeType(typeName)
+ if !found {
+ return nil, false
+ }
+ val, unwrapped, err := td.MaybeUnwrap(v)
+ if err != nil {
+ return UnsupportedRefValConversionErr(v), true
+ }
+ if !unwrapped {
+ return nil, false
+ }
+ return a.NativeToValue(val), true
+ // Note: dynamicpb.Message implements the proto.Message _and_ protoreflect.Message interfaces
+ // which means that this case must appear after handling a proto.Message type.
+ case protoreflect.Message:
+ return a.NativeToValue(v.Interface()), true
+ default:
+ refValue := reflect.ValueOf(v)
+ if refValue.Kind() == reflect.Ptr {
+ if refValue.IsNil() {
+ return UnsupportedRefValConversionErr(v), true
+ }
+ refValue = refValue.Elem()
+ }
+ refKind := refValue.Kind()
+ switch refKind {
+ case reflect.Array, reflect.Slice:
+ return NewDynamicList(a, v), true
+ case reflect.Map:
+ return NewDynamicMap(a, v), true
+ // type aliases of primitive types cannot be asserted as that type, but rather need
+ // to be downcast to int32 before being converted to a CEL representation.
+ case reflect.Int32:
+ intType := reflect.TypeOf(int32(0))
+ return Int(refValue.Convert(intType).Interface().(int32)), true
+ case reflect.Int64:
+ intType := reflect.TypeOf(int64(0))
+ return Int(refValue.Convert(intType).Interface().(int64)), true
+ case reflect.Uint32:
+ uintType := reflect.TypeOf(uint32(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint32)), true
+ case reflect.Uint64:
+ uintType := reflect.TypeOf(uint64(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint64)), true
+ case reflect.Float32:
+ doubleType := reflect.TypeOf(float32(0))
+ return Double(refValue.Convert(doubleType).Interface().(float32)), true
+ case reflect.Float64:
+ doubleType := reflect.TypeOf(float64(0))
+ return Double(refValue.Convert(doubleType).Interface().(float64)), true
+ }
+ }
+ return nil, false
+}
+
+func msgSetField(target protoreflect.Message, field *pb.FieldDescription, val ref.Val) error {
+ if field.IsList() {
+ lv := target.NewField(field.Descriptor())
+ list, ok := val.(traits.Lister)
+ if !ok {
+ return unsupportedTypeConversionError(field, val)
+ }
+ err := msgSetListField(lv.List(), field, list)
+ if err != nil {
+ return err
+ }
+ target.Set(field.Descriptor(), lv)
+ return nil
+ }
+ if field.IsMap() {
+ mv := target.NewField(field.Descriptor())
+ mp, ok := val.(traits.Mapper)
+ if !ok {
+ return unsupportedTypeConversionError(field, val)
+ }
+ err := msgSetMapField(mv.Map(), field, mp)
+ if err != nil {
+ return err
+ }
+ target.Set(field.Descriptor(), mv)
+ return nil
+ }
+ v, err := val.ConvertToNative(field.ReflectType())
+ if err != nil {
+ return fieldTypeConversionError(field, err)
+ }
+ if v == nil {
+ return nil
+ }
+ switch pv := v.(type) {
+ case proto.Message:
+ v = pv.ProtoReflect()
+ }
+ target.Set(field.Descriptor(), protoreflect.ValueOf(v))
+ return nil
+}
+
+func msgSetListField(target protoreflect.List, listField *pb.FieldDescription, listVal traits.Lister) error {
+ elemReflectType := listField.ReflectType().Elem()
+ for i := Int(0); i < listVal.Size().(Int); i++ {
+ elem := listVal.Get(i)
+ elemVal, err := elem.ConvertToNative(elemReflectType)
+ if err != nil {
+ return fieldTypeConversionError(listField, err)
+ }
+ if elemVal == nil {
+ continue
+ }
+ switch ev := elemVal.(type) {
+ case proto.Message:
+ elemVal = ev.ProtoReflect()
+ }
+ target.Append(protoreflect.ValueOf(elemVal))
+ }
+ return nil
+}
+
+func msgSetMapField(target protoreflect.Map, mapField *pb.FieldDescription, mapVal traits.Mapper) error {
+ targetKeyType := mapField.KeyType.ReflectType()
+ targetValType := mapField.ValueType.ReflectType()
+ it := mapVal.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ val := mapVal.Get(key)
+ k, err := key.ConvertToNative(targetKeyType)
+ if err != nil {
+ return fieldTypeConversionError(mapField, err)
+ }
+ v, err := val.ConvertToNative(targetValType)
+ if err != nil {
+ return fieldTypeConversionError(mapField, err)
+ }
+ if v == nil {
+ continue
+ }
+ switch pv := v.(type) {
+ case proto.Message:
+ v = pv.ProtoReflect()
+ }
+ target.Set(protoreflect.ValueOf(k).MapKey(), protoreflect.ValueOf(v))
+ }
+ return nil
+}
+
+func unsupportedTypeConversionError(field *pb.FieldDescription, val ref.Val) error {
+ msgName := field.Descriptor().ContainingMessage().FullName()
+ return fmt.Errorf("unsupported field type for %v.%v: %v", msgName, field.Name(), val.Type())
+}
+
+func fieldTypeConversionError(field *pb.FieldDescription, err error) error {
+ msgName := field.Descriptor().ContainingMessage().FullName()
+ return fmt.Errorf("field type conversion error for %v.%v value type: %v", msgName, field.Name(), err)
+}
+
+var (
+ // ProtoCELPrimitives provides a map from the protoreflect Kind to the equivalent CEL type.
+ ProtoCELPrimitives = map[protoreflect.Kind]*Type{
+ protoreflect.BoolKind: BoolType,
+ protoreflect.BytesKind: BytesType,
+ protoreflect.DoubleKind: DoubleType,
+ protoreflect.FloatKind: DoubleType,
+ protoreflect.Int32Kind: IntType,
+ protoreflect.Int64Kind: IntType,
+ protoreflect.Sint32Kind: IntType,
+ protoreflect.Sint64Kind: IntType,
+ protoreflect.Uint32Kind: UintType,
+ protoreflect.Uint64Kind: UintType,
+ protoreflect.Fixed32Kind: UintType,
+ protoreflect.Fixed64Kind: UintType,
+ protoreflect.Sfixed32Kind: IntType,
+ protoreflect.Sfixed64Kind: IntType,
+ protoreflect.StringKind: StringType,
+ }
+)
diff --git a/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel
new file mode 100644
index 0000000..79330c3
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel
@@ -0,0 +1,20 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "provider.go",
+ "reference.go",
+ ],
+ importpath = "github.com/google/cel-go/common/types/ref",
+ deps = [
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/types/ref/provider.go b/vendor/github.com/google/cel-go/common/types/ref/provider.go
new file mode 100644
index 0000000..b982002
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/ref/provider.go
@@ -0,0 +1,102 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ref
+
+import (
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// TypeProvider specifies functions for creating new object instances and for
+// resolving enum values by name.
+//
+// Deprecated: use types.Provider
+type TypeProvider interface {
+ // EnumValue returns the numeric value of the given enum value name.
+ EnumValue(enumName string) Val
+
+ // FindIdent takes a qualified identifier name and returns a Value if one exists.
+ FindIdent(identName string) (Val, bool)
+
+ // FindType looks up the Type given a qualified typeName. Returns false if not found.
+ FindType(typeName string) (*exprpb.Type, bool)
+
+ // FieldFieldType returns the field type for a checked type value. Returns false if
+ // the field could not be found.
+ FindFieldType(messageType, fieldName string) (*FieldType, bool)
+
+ // NewValue creates a new type value from a qualified name and map of field name
+ // to value.
+ //
+ // Note, for each value, the Val.ConvertToNative function will be invoked to convert
+ // the Val to the field's native type. If an error occurs during conversion, the
+ // NewValue will be a types.Err.
+ NewValue(typeName string, fields map[string]Val) Val
+}
+
+// TypeAdapter converts native Go values of varying type and complexity to equivalent CEL values.
+//
+// Deprecated: use types.Adapter
+type TypeAdapter interface {
+ // NativeToValue converts the input `value` to a CEL `ref.Val`.
+ NativeToValue(value any) Val
+}
+
+// TypeRegistry allows third-parties to add custom types to CEL. Not all `TypeProvider`
+// implementations support type-customization, so these features are optional. However, a
+// `TypeRegistry` should be a `TypeProvider` and a `TypeAdapter` to ensure that types
+// which are registered can be converted to CEL representations.
+//
+// Deprecated: use types.Registry
+type TypeRegistry interface {
+ TypeAdapter
+ TypeProvider
+
+ // RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`.
+ RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error
+
+ // RegisterMessage registers a protocol buffer message and its dependencies.
+ RegisterMessage(message proto.Message) error
+
+ // RegisterType registers a type value with the provider which ensures the
+ // provider is aware of how to map the type to an identifier.
+ //
+ // If a type is provided more than once with an alternative definition, the
+ // call will result in an error.
+ RegisterType(types ...Type) error
+}
+
+// FieldType represents a field's type value and whether that field supports
+// presence detection.
+//
+// Deprecated: use types.FieldType
+type FieldType struct {
+ // Type of the field as a protobuf type value.
+ Type *exprpb.Type
+
+ // IsSet indicates whether the field is set on an input object.
+ IsSet FieldTester
+
+ // GetFrom retrieves the field value on the input object, if set.
+ GetFrom FieldGetter
+}
+
+// FieldTester is used to test field presence on an input object.
+type FieldTester func(target any) bool
+
+// FieldGetter is used to get the field value from an input object, if set.
+type FieldGetter func(target any) (any, error)
diff --git a/vendor/github.com/google/cel-go/common/types/ref/reference.go b/vendor/github.com/google/cel-go/common/types/ref/reference.go
new file mode 100644
index 0000000..e0d5814
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/ref/reference.go
@@ -0,0 +1,63 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ref contains the reference interfaces used throughout the types components.
+package ref
+
+import (
+ "reflect"
+)
+
+// Type interface indicate the name of a given type.
+type Type interface {
+ // HasTrait returns whether the type has a given trait associated with it.
+ //
+ // See common/types/traits/traits.go for a list of supported traits.
+ HasTrait(trait int) bool
+
+ // TypeName returns the qualified type name of the type.
+ //
+ // The type name is also used as the type's identifier name at type-check and interpretation time.
+ TypeName() string
+}
+
+// Val interface defines the functions supported by all expression values.
+// Val implementations may specialize the behavior of the value through the addition of traits.
+type Val interface {
+ // ConvertToNative converts the Value to a native Go struct according to the
+ // reflected type description, or error if the conversion is not feasible.
+ //
+ // The ConvertToNative method is intended to be used to support conversion between CEL types
+ // and native types during object creation expressions or by clients who need to adapt the,
+ // returned CEL value into an equivalent Go value instance.
+ //
+ // When implementing or using ConvertToNative, the following guidelines apply:
+ // - Use ConvertToNative when marshalling CEL evaluation results to native types.
+ // - Do not use ConvertToNative within CEL extension functions.
+ // - Document whether your implementation supports non-CEL field types, such as Go or Protobuf.
+ ConvertToNative(typeDesc reflect.Type) (any, error)
+
+ // ConvertToType supports type conversions between CEL value types supported by the expression language.
+ ConvertToType(typeValue Type) Val
+
+ // Equal returns true if the `other` value has the same type and content as the implementing struct.
+ Equal(other Val) Val
+
+ // Type returns the TypeValue of the value.
+ Type() Type
+
+ // Value returns the raw value of the instance which may not be directly compatible with the expression
+ // language types.
+ Value() any
+}
diff --git a/vendor/github.com/google/cel-go/common/types/string.go b/vendor/github.com/google/cel-go/common/types/string.go
new file mode 100644
index 0000000..028e682
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/string.go
@@ -0,0 +1,229 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// String type implementation which supports addition, comparison, matching,
+// and size functions.
+type String string
+
+var (
+ stringOneArgOverloads = map[string]func(ref.Val, ref.Val) ref.Val{
+ overloads.Contains: StringContains,
+ overloads.EndsWith: StringEndsWith,
+ overloads.StartsWith: StringStartsWith,
+ }
+
+ stringWrapperType = reflect.TypeOf(&wrapperspb.StringValue{})
+)
+
+// Add implements traits.Adder.Add.
+func (s String) Add(other ref.Val) ref.Val {
+ otherString, ok := other.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return s + otherString
+}
+
+// Compare implements traits.Comparer.Compare.
+func (s String) Compare(other ref.Val) ref.Val {
+ otherString, ok := other.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return Int(strings.Compare(s.Value().(string), otherString.Value().(string)))
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (s String) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.String:
+ if reflect.TypeOf(s).AssignableTo(typeDesc) {
+ return s, nil
+ }
+ return s.Value(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.String(string(s)))
+ case jsonValueType:
+ // Convert to a protobuf representation of a JSON String.
+ return structpb.NewStringValue(string(s)), nil
+ case stringWrapperType:
+ // Convert to a wrapperspb.StringValue.
+ return wrapperspb.String(string(s)), nil
+ }
+ if typeDesc.Elem().Kind() == reflect.String {
+ p := s.Value().(string)
+ return &p, nil
+ }
+ case reflect.Interface:
+ sv := s.Value()
+ if reflect.TypeOf(sv).Implements(typeDesc) {
+ return sv, nil
+ }
+ if reflect.TypeOf(s).Implements(typeDesc) {
+ return s, nil
+ }
+ }
+ return nil, fmt.Errorf(
+ "unsupported native conversion from string to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (s String) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ if n, err := strconv.ParseInt(s.Value().(string), 10, 64); err == nil {
+ return Int(n)
+ }
+ case UintType:
+ if n, err := strconv.ParseUint(s.Value().(string), 10, 64); err == nil {
+ return Uint(n)
+ }
+ case DoubleType:
+ if n, err := strconv.ParseFloat(s.Value().(string), 64); err == nil {
+ return Double(n)
+ }
+ case BoolType:
+ if b, err := strconv.ParseBool(s.Value().(string)); err == nil {
+ return Bool(b)
+ }
+ case BytesType:
+ return Bytes(s)
+ case DurationType:
+ if d, err := time.ParseDuration(s.Value().(string)); err == nil {
+ return durationOf(d)
+ }
+ case TimestampType:
+ if t, err := time.Parse(time.RFC3339, s.Value().(string)); err == nil {
+ if t.Unix() < minUnixTime || t.Unix() > maxUnixTime {
+ return celErrTimestampOverflow
+ }
+ return timestampOf(t)
+ }
+ case StringType:
+ return s
+ case TypeType:
+ return StringType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", StringType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (s String) Equal(other ref.Val) ref.Val {
+ otherString, ok := other.(String)
+ return Bool(ok && s == otherString)
+}
+
+// IsZeroValue returns true if the string is empty.
+func (s String) IsZeroValue() bool {
+ return len(s) == 0
+}
+
+// Match implements traits.Matcher.Match.
+func (s String) Match(pattern ref.Val) ref.Val {
+ pat, ok := pattern.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(pattern)
+ }
+ matched, err := regexp.MatchString(pat.Value().(string), s.Value().(string))
+ if err != nil {
+ return &Err{err}
+ }
+ return Bool(matched)
+}
+
+// Receive implements traits.Receiver.Receive.
+func (s String) Receive(function string, overload string, args []ref.Val) ref.Val {
+ switch len(args) {
+ case 1:
+ if f, found := stringOneArgOverloads[function]; found {
+ return f(s, args[0])
+ }
+ }
+ return NoSuchOverloadErr()
+}
+
+// Size implements traits.Sizer.Size.
+func (s String) Size() ref.Val {
+ return Int(len([]rune(s.Value().(string))))
+}
+
+// Type implements ref.Val.Type.
+func (s String) Type() ref.Type {
+ return StringType
+}
+
+// Value implements ref.Val.Value.
+func (s String) Value() any {
+ return string(s)
+}
+
+// StringContains returns whether the string contains a substring.
+func StringContains(s, sub ref.Val) ref.Val {
+ str, ok := s.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(s)
+ }
+ subStr, ok := sub.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(sub)
+ }
+ return Bool(strings.Contains(string(str), string(subStr)))
+}
+
+// StringEndsWith returns whether the target string contains the input suffix.
+func StringEndsWith(s, suf ref.Val) ref.Val {
+ str, ok := s.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(s)
+ }
+ sufStr, ok := suf.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(suf)
+ }
+ return Bool(strings.HasSuffix(string(str), string(sufStr)))
+}
+
+// StringStartsWith returns whether the target string contains the input prefix.
+func StringStartsWith(s, pre ref.Val) ref.Val {
+ str, ok := s.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(s)
+ }
+ preStr, ok := pre.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(pre)
+ }
+ return Bool(strings.HasPrefix(string(str), string(preStr)))
+}
diff --git a/vendor/github.com/google/cel-go/common/types/timestamp.go b/vendor/github.com/google/cel-go/common/types/timestamp.go
new file mode 100644
index 0000000..33acdea
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/timestamp.go
@@ -0,0 +1,311 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ tpb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// Timestamp type implementation which supports add, compare, and subtract
+// operations. Timestamps are also capable of participating in dynamic
+// function dispatch to instance methods.
+type Timestamp struct {
+ time.Time
+}
+
+func timestampOf(t time.Time) Timestamp {
+ // Note that this function does not validate that time.Time is in our supported range.
+ return Timestamp{Time: t}
+}
+
+const (
+ // The number of seconds between year 1 and year 1970. This is borrowed from
+ // https://golang.org/src/time/time.go.
+ unixToInternal int64 = (1969*365 + 1969/4 - 1969/100 + 1969/400) * (60 * 60 * 24)
+
+ // Number of seconds between `0001-01-01T00:00:00Z` and the Unix epoch.
+ minUnixTime int64 = -62135596800
+ // Number of seconds between `9999-12-31T23:59:59.999999999Z` and the Unix epoch.
+ maxUnixTime int64 = 253402300799
+)
+
+// Add implements traits.Adder.Add.
+func (t Timestamp) Add(other ref.Val) ref.Val {
+ switch other.Type() {
+ case DurationType:
+ return other.(Duration).Add(t)
+ }
+ return MaybeNoSuchOverloadErr(other)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (t Timestamp) Compare(other ref.Val) ref.Val {
+ if TimestampType != other.Type() {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ ts1 := t.Time
+ ts2 := other.(Timestamp).Time
+ switch {
+ case ts1.Before(ts2):
+ return IntNegOne
+ case ts1.After(ts2):
+ return IntOne
+ default:
+ return IntZero
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (t Timestamp) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the timestamp is already assignable to the desired type return it.
+ if reflect.TypeOf(t.Time).AssignableTo(typeDesc) {
+ return t.Time, nil
+ }
+ if reflect.TypeOf(t).AssignableTo(typeDesc) {
+ return t, nil
+ }
+ switch typeDesc {
+ case anyValueType:
+ // Pack the underlying time as a tpb.Timestamp into an Any value.
+ return anypb.New(tpb.New(t.Time))
+ case jsonValueType:
+ // CEL follows the proto3 to JSON conversion which formats as an RFC 3339 encoded JSON
+ // string.
+ v := t.ConvertToType(StringType)
+ if IsError(v) {
+ return nil, v.(*Err)
+ }
+ return structpb.NewStringValue(string(v.(String))), nil
+ case timestampValueType:
+ // Unwrap the underlying tpb.Timestamp.
+ return tpb.New(t.Time), nil
+ }
+ return nil, fmt.Errorf("type conversion error from 'Timestamp' to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (t Timestamp) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String(t.Format(time.RFC3339Nano))
+ case IntType:
+ // Return the Unix time in seconds since 1970
+ return Int(t.Unix())
+ case TimestampType:
+ return t
+ case TypeType:
+ return TimestampType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", TimestampType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (t Timestamp) Equal(other ref.Val) ref.Val {
+ otherTime, ok := other.(Timestamp)
+ return Bool(ok && t.Time.Equal(otherTime.Time))
+}
+
+// IsZeroValue returns true if the timestamp is epoch 0.
+func (t Timestamp) IsZeroValue() bool {
+ return t.IsZero()
+}
+
+// Receive implements traits.Receiver.Receive.
+func (t Timestamp) Receive(function string, overload string, args []ref.Val) ref.Val {
+ switch len(args) {
+ case 0:
+ if f, found := timestampZeroArgOverloads[function]; found {
+ return f(t.Time)
+ }
+ case 1:
+ if f, found := timestampOneArgOverloads[function]; found {
+ return f(t.Time, args[0])
+ }
+ }
+ return NoSuchOverloadErr()
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (t Timestamp) Subtract(subtrahend ref.Val) ref.Val {
+ switch subtrahend.Type() {
+ case DurationType:
+ dur := subtrahend.(Duration)
+ val, err := subtractTimeDurationChecked(t.Time, dur.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return timestampOf(val)
+ case TimestampType:
+ t2 := subtrahend.(Timestamp).Time
+ val, err := subtractTimeChecked(t.Time, t2)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return durationOf(val)
+ }
+ return MaybeNoSuchOverloadErr(subtrahend)
+}
+
+// Type implements ref.Val.Type.
+func (t Timestamp) Type() ref.Type {
+ return TimestampType
+}
+
+// Value implements ref.Val.Value.
+func (t Timestamp) Value() any {
+ return t.Time
+}
+
+var (
+ timestampValueType = reflect.TypeOf(&tpb.Timestamp{})
+
+ timestampZeroArgOverloads = map[string]func(time.Time) ref.Val{
+ overloads.TimeGetFullYear: timestampGetFullYear,
+ overloads.TimeGetMonth: timestampGetMonth,
+ overloads.TimeGetDayOfYear: timestampGetDayOfYear,
+ overloads.TimeGetDate: timestampGetDayOfMonthOneBased,
+ overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBased,
+ overloads.TimeGetDayOfWeek: timestampGetDayOfWeek,
+ overloads.TimeGetHours: timestampGetHours,
+ overloads.TimeGetMinutes: timestampGetMinutes,
+ overloads.TimeGetSeconds: timestampGetSeconds,
+ overloads.TimeGetMilliseconds: timestampGetMilliseconds}
+
+ timestampOneArgOverloads = map[string]func(time.Time, ref.Val) ref.Val{
+ overloads.TimeGetFullYear: timestampGetFullYearWithTz,
+ overloads.TimeGetMonth: timestampGetMonthWithTz,
+ overloads.TimeGetDayOfYear: timestampGetDayOfYearWithTz,
+ overloads.TimeGetDate: timestampGetDayOfMonthOneBasedWithTz,
+ overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBasedWithTz,
+ overloads.TimeGetDayOfWeek: timestampGetDayOfWeekWithTz,
+ overloads.TimeGetHours: timestampGetHoursWithTz,
+ overloads.TimeGetMinutes: timestampGetMinutesWithTz,
+ overloads.TimeGetSeconds: timestampGetSecondsWithTz,
+ overloads.TimeGetMilliseconds: timestampGetMillisecondsWithTz}
+)
+
+type timestampVisitor func(time.Time) ref.Val
+
+func timestampGetFullYear(t time.Time) ref.Val {
+ return Int(t.Year())
+}
+func timestampGetMonth(t time.Time) ref.Val {
+ // CEL spec indicates that the month should be 0-based, but the Time value
+ // for Month() is 1-based.
+ return Int(t.Month() - 1)
+}
+func timestampGetDayOfYear(t time.Time) ref.Val {
+ return Int(t.YearDay() - 1)
+}
+func timestampGetDayOfMonthZeroBased(t time.Time) ref.Val {
+ return Int(t.Day() - 1)
+}
+func timestampGetDayOfMonthOneBased(t time.Time) ref.Val {
+ return Int(t.Day())
+}
+func timestampGetDayOfWeek(t time.Time) ref.Val {
+ return Int(t.Weekday())
+}
+func timestampGetHours(t time.Time) ref.Val {
+ return Int(t.Hour())
+}
+func timestampGetMinutes(t time.Time) ref.Val {
+ return Int(t.Minute())
+}
+func timestampGetSeconds(t time.Time) ref.Val {
+ return Int(t.Second())
+}
+func timestampGetMilliseconds(t time.Time) ref.Val {
+ return Int(t.Nanosecond() / 1000000)
+}
+
+func timestampGetFullYearWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetFullYear)(t)
+}
+func timestampGetMonthWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetMonth)(t)
+}
+func timestampGetDayOfYearWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfYear)(t)
+}
+func timestampGetDayOfMonthZeroBasedWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfMonthZeroBased)(t)
+}
+func timestampGetDayOfMonthOneBasedWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfMonthOneBased)(t)
+}
+func timestampGetDayOfWeekWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfWeek)(t)
+}
+func timestampGetHoursWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetHours)(t)
+}
+func timestampGetMinutesWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetMinutes)(t)
+}
+func timestampGetSecondsWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetSeconds)(t)
+}
+func timestampGetMillisecondsWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetMilliseconds)(t)
+}
+
+func timeZone(tz ref.Val, visitor timestampVisitor) timestampVisitor {
+ return func(t time.Time) ref.Val {
+ if StringType != tz.Type() {
+ return MaybeNoSuchOverloadErr(tz)
+ }
+ val := string(tz.(String))
+ ind := strings.Index(val, ":")
+ if ind == -1 {
+ loc, err := time.LoadLocation(val)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return visitor(t.In(loc))
+ }
+
+ // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC
+ // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes.
+ hr, err := strconv.Atoi(string(val[0:ind]))
+ if err != nil {
+ return WrapErr(err)
+ }
+ min, err := strconv.Atoi(string(val[ind+1:]))
+ if err != nil {
+ return WrapErr(err)
+ }
+ var offset int
+ if string(val[0]) == "-" {
+ offset = hr*60 - min
+ } else {
+ offset = hr*60 + min
+ }
+ secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds())
+ timezone := time.FixedZone("", secondsEastOfUTC)
+ return visitor(t.In(timezone))
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel
new file mode 100644
index 0000000..b19eb83
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel
@@ -0,0 +1,29 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "comparer.go",
+ "container.go",
+ "field_tester.go",
+ "indexer.go",
+ "iterator.go",
+ "lister.go",
+ "mapper.go",
+ "matcher.go",
+ "math.go",
+ "receiver.go",
+ "sizer.go",
+ "traits.go",
+ "zeroer.go",
+ ],
+ importpath = "github.com/google/cel-go/common/types/traits",
+ deps = [
+ "//common/types/ref:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/types/traits/comparer.go b/vendor/github.com/google/cel-go/common/types/traits/comparer.go
new file mode 100644
index 0000000..b531d9a
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/comparer.go
@@ -0,0 +1,33 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Comparer interface for ordering comparisons between values in order to
+// support '<', '<=', '>=', '>' overloads.
+type Comparer interface {
+ // Compare this value to the input other value, returning an Int:
+ //
+ // this < other -> Int(-1)
+ // this == other -> Int(0)
+ // this > other -> Int(1)
+ //
+ // If the comparison cannot be made or is not supported, an error should
+ // be returned.
+ Compare(other ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/container.go b/vendor/github.com/google/cel-go/common/types/traits/container.go
new file mode 100644
index 0000000..cf5c621
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/container.go
@@ -0,0 +1,23 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Container interface which permits containment tests such as 'a in b'.
+type Container interface {
+ // Contains returns true if the value exists within the object.
+ Contains(value ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/field_tester.go b/vendor/github.com/google/cel-go/common/types/traits/field_tester.go
new file mode 100644
index 0000000..816a956
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/field_tester.go
@@ -0,0 +1,30 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// FieldTester indicates if a defined field on an object type is set to a
+// non-default value.
+//
+// For use with the `has()` macro.
+type FieldTester interface {
+ // IsSet returns true if the field is defined and set to a non-default
+ // value. The method will return false if defined and not set, and an error
+ // if the field is not defined.
+ IsSet(field ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/indexer.go b/vendor/github.com/google/cel-go/common/types/traits/indexer.go
new file mode 100644
index 0000000..662c683
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/indexer.go
@@ -0,0 +1,25 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Indexer permits random access of elements by index 'a[b()]'.
+type Indexer interface {
+ // Get the value at the specified index or error.
+ Get(index ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/iterator.go b/vendor/github.com/google/cel-go/common/types/traits/iterator.go
new file mode 100644
index 0000000..42dd371
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/iterator.go
@@ -0,0 +1,36 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Iterable aggregate types permit traversal over their elements.
+type Iterable interface {
+ // Iterator returns a new iterator view of the struct.
+ Iterator() Iterator
+}
+
+// Iterator permits safe traversal over the contents of an aggregate type.
+type Iterator interface {
+ ref.Val
+
+ // HasNext returns true if there are unvisited elements in the Iterator.
+ HasNext() ref.Val
+
+ // Next returns the next element.
+ Next() ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/lister.go b/vendor/github.com/google/cel-go/common/types/traits/lister.go
new file mode 100644
index 0000000..5cf2593
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/lister.go
@@ -0,0 +1,33 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Lister interface which aggregates the traits of a list.
+type Lister interface {
+ ref.Val
+ Adder
+ Container
+ Indexer
+ Iterable
+ Sizer
+}
+
+// MutableLister interface which emits an immutable result after an intermediate computation.
+type MutableLister interface {
+ Lister
+ ToImmutableList() Lister
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/mapper.go b/vendor/github.com/google/cel-go/common/types/traits/mapper.go
new file mode 100644
index 0000000..2f7c919
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/mapper.go
@@ -0,0 +1,33 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Mapper interface which aggregates the traits of a maps.
+type Mapper interface {
+ ref.Val
+ Container
+ Indexer
+ Iterable
+ Sizer
+
+ // Find returns a value, if one exists, for the input key.
+ //
+ // If the key is not found the function returns (nil, false).
+ // If the input key is not valid for the map, or is Err or Unknown the function returns
+ // (Unknown|Err, false).
+ Find(key ref.Val) (ref.Val, bool)
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/matcher.go b/vendor/github.com/google/cel-go/common/types/traits/matcher.go
new file mode 100644
index 0000000..085dc94
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/matcher.go
@@ -0,0 +1,23 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Matcher interface for supporting 'matches()' overloads.
+type Matcher interface {
+ // Match returns true if the pattern matches the current value.
+ Match(pattern ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/math.go b/vendor/github.com/google/cel-go/common/types/traits/math.go
new file mode 100644
index 0000000..86d5b91
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/math.go
@@ -0,0 +1,62 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Adder interface to support '+' operator overloads.
+type Adder interface {
+ // Add returns a combination of the current value and other value.
+ //
+ // If the other value is an unsupported type, an error is returned.
+ Add(other ref.Val) ref.Val
+}
+
+// Divider interface to support '/' operator overloads.
+type Divider interface {
+ // Divide returns the result of dividing the current value by the input
+ // denominator.
+ //
+ // A denominator value of zero results in an error.
+ Divide(denominator ref.Val) ref.Val
+}
+
+// Modder interface to support '%' operator overloads.
+type Modder interface {
+ // Modulo returns the result of taking the modulus of the current value
+ // by the denominator.
+ //
+ // A denominator value of zero results in an error.
+ Modulo(denominator ref.Val) ref.Val
+}
+
+// Multiplier interface to support '*' operator overloads.
+type Multiplier interface {
+ // Multiply returns the result of multiplying the current and input value.
+ Multiply(other ref.Val) ref.Val
+}
+
+// Negater interface to support unary '-' and '!' operator overloads.
+type Negater interface {
+ // Negate returns the complement of the current value.
+ Negate() ref.Val
+}
+
+// Subtractor interface to support binary '-' operator overloads.
+type Subtractor interface {
+ // Subtract returns the result of subtracting the input from the current
+ // value.
+ Subtract(subtrahend ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/receiver.go b/vendor/github.com/google/cel-go/common/types/traits/receiver.go
new file mode 100644
index 0000000..8f41db4
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/receiver.go
@@ -0,0 +1,24 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Receiver interface for routing instance method calls within a value.
+type Receiver interface {
+ // Receive accepts a function name, overload id, and arguments and returns
+ // a value.
+ Receive(function string, overload string, args []ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/sizer.go b/vendor/github.com/google/cel-go/common/types/traits/sizer.go
new file mode 100644
index 0000000..b80d251
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/sizer.go
@@ -0,0 +1,25 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Sizer interface for supporting 'size()' overloads.
+type Sizer interface {
+ // Size returns the number of elements or length of the value.
+ Size() ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/traits.go b/vendor/github.com/google/cel-go/common/types/traits/traits.go
new file mode 100644
index 0000000..6da3e6a
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/traits.go
@@ -0,0 +1,64 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package traits defines interfaces that a type may implement to participate
+// in operator overloads and function dispatch.
+package traits
+
+const (
+ // AdderType types provide a '+' operator overload.
+ AdderType = 1 << iota
+
+ // ComparerType types support ordering comparisons '<', '<=', '>', '>='.
+ ComparerType
+
+ // ContainerType types support 'in' operations.
+ ContainerType
+
+ // DividerType types support '/' operations.
+ DividerType
+
+ // FieldTesterType types support the detection of field value presence.
+ FieldTesterType
+
+ // IndexerType types support index access with dynamic values.
+ IndexerType
+
+ // IterableType types can be iterated over in comprehensions.
+ IterableType
+
+ // IteratorType types support iterator semantics.
+ IteratorType
+
+ // MatcherType types support pattern matching via 'matches' method.
+ MatcherType
+
+ // ModderType types support modulus operations '%'
+ ModderType
+
+ // MultiplierType types support '*' operations.
+ MultiplierType
+
+ // NegatorType types support either negation via '!' or '-'
+ NegatorType
+
+ // ReceiverType types support dynamic dispatch to instance methods.
+ ReceiverType
+
+ // SizerType types support the size() method.
+ SizerType
+
+ // SubtractorType type support '-' operations.
+ SubtractorType
+)
diff --git a/vendor/github.com/google/cel-go/common/types/traits/zeroer.go b/vendor/github.com/google/cel-go/common/types/traits/zeroer.go
new file mode 100644
index 0000000..0b7c830
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/zeroer.go
@@ -0,0 +1,21 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+// Zeroer interface for testing whether a CEL value is a zero value for its type.
+type Zeroer interface {
+ // IsZeroValue indicates whether the object is the zero value for the type.
+ IsZeroValue() bool
+}
diff --git a/vendor/github.com/google/cel-go/common/types/types.go b/vendor/github.com/google/cel-go/common/types/types.go
new file mode 100644
index 0000000..76624ee
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/types.go
@@ -0,0 +1,806 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ chkdecls "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Kind indicates a CEL type's kind which is used to differentiate quickly between simple
+// and complex types.
+type Kind uint
+
+const (
+ // UnspecifiedKind is returned when the type is nil or its kind is not specified.
+ UnspecifiedKind Kind = iota
+
+ // DynKind represents a dynamic type. This kind only exists at type-check time.
+ DynKind
+
+ // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time.
+ // Prefer DynKind to AnyKind as AnyKind has a specific meaning which is based on protobuf
+ // well-known types.
+ AnyKind
+
+ // BoolKind represents a boolean type.
+ BoolKind
+
+ // BytesKind represents a bytes type.
+ BytesKind
+
+ // DoubleKind represents a double type.
+ DoubleKind
+
+ // DurationKind represents a CEL duration type.
+ DurationKind
+
+ // ErrorKind represents a CEL error type.
+ ErrorKind
+
+ // IntKind represents an integer type.
+ IntKind
+
+ // ListKind represents a list type.
+ ListKind
+
+ // MapKind represents a map type.
+ MapKind
+
+ // NullTypeKind represents a null type.
+ NullTypeKind
+
+ // OpaqueKind represents an abstract type which has no accessible fields.
+ OpaqueKind
+
+ // StringKind represents a string type.
+ StringKind
+
+ // StructKind represents a structured object with typed fields.
+ StructKind
+
+ // TimestampKind represents a a CEL time type.
+ TimestampKind
+
+ // TypeKind represents the CEL type.
+ TypeKind
+
+ // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible.
+ TypeParamKind
+
+ // UintKind represents a uint type.
+ UintKind
+
+ // UnknownKind represents an unknown value type.
+ UnknownKind
+)
+
+var (
+ // AnyType represents the google.protobuf.Any type.
+ AnyType = &Type{
+ kind: AnyKind,
+ runtimeTypeName: "google.protobuf.Any",
+ traitMask: traits.FieldTesterType |
+ traits.IndexerType,
+ }
+ // BoolType represents the bool type.
+ BoolType = &Type{
+ kind: BoolKind,
+ runtimeTypeName: "bool",
+ traitMask: traits.ComparerType |
+ traits.NegatorType,
+ }
+ // BytesType represents the bytes type.
+ BytesType = &Type{
+ kind: BytesKind,
+ runtimeTypeName: "bytes",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.SizerType,
+ }
+ // DoubleType represents the double type.
+ DoubleType = &Type{
+ kind: DoubleKind,
+ runtimeTypeName: "double",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.DividerType |
+ traits.MultiplierType |
+ traits.NegatorType |
+ traits.SubtractorType,
+ }
+ // DurationType represents the CEL duration type.
+ DurationType = &Type{
+ kind: DurationKind,
+ runtimeTypeName: "google.protobuf.Duration",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.NegatorType |
+ traits.ReceiverType |
+ traits.SubtractorType,
+ }
+ // DynType represents a dynamic CEL type whose type will be determined at runtime from context.
+ DynType = &Type{
+ kind: DynKind,
+ runtimeTypeName: "dyn",
+ }
+ // ErrorType represents a CEL error value.
+ ErrorType = &Type{
+ kind: ErrorKind,
+ runtimeTypeName: "error",
+ }
+ // IntType represents the int type.
+ IntType = &Type{
+ kind: IntKind,
+ runtimeTypeName: "int",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.DividerType |
+ traits.ModderType |
+ traits.MultiplierType |
+ traits.NegatorType |
+ traits.SubtractorType,
+ }
+ // ListType represents the runtime list type.
+ ListType = NewListType(nil)
+ // MapType represents the runtime map type.
+ MapType = NewMapType(nil, nil)
+ // NullType represents the type of a null value.
+ NullType = &Type{
+ kind: NullTypeKind,
+ runtimeTypeName: "null_type",
+ }
+ // StringType represents the string type.
+ StringType = &Type{
+ kind: StringKind,
+ runtimeTypeName: "string",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.MatcherType |
+ traits.ReceiverType |
+ traits.SizerType,
+ }
+ // TimestampType represents the time type.
+ TimestampType = &Type{
+ kind: TimestampKind,
+ runtimeTypeName: "google.protobuf.Timestamp",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.ReceiverType |
+ traits.SubtractorType,
+ }
+ // TypeType represents a CEL type
+ TypeType = &Type{
+ kind: TypeKind,
+ runtimeTypeName: "type",
+ }
+ // UintType represents a uint type.
+ UintType = &Type{
+ kind: UintKind,
+ runtimeTypeName: "uint",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.DividerType |
+ traits.ModderType |
+ traits.MultiplierType |
+ traits.SubtractorType,
+ }
+ // UnknownType represents an unknown value type.
+ UnknownType = &Type{
+ kind: UnknownKind,
+ runtimeTypeName: "unknown",
+ }
+)
+
+var _ ref.Type = &Type{}
+var _ ref.Val = &Type{}
+
+// Type holds a reference to a runtime type with an optional type-checked set of type parameters.
+type Type struct {
+ // kind indicates general category of the type.
+ kind Kind
+
+ // parameters holds the optional type-checked set of type Parameters that are used during static analysis.
+ parameters []*Type
+
+ // runtimeTypeName indicates the runtime type name of the type.
+ runtimeTypeName string
+
+ // isAssignableType function determines whether one type is assignable to this type.
+ // A nil value for the isAssignableType function falls back to equality of kind, runtimeType, and parameters.
+ isAssignableType func(other *Type) bool
+
+ // isAssignableRuntimeType function determines whether the runtime type (with erasure) is assignable to this type.
+ // A nil value for the isAssignableRuntimeType function falls back to the equality of the type or type name.
+ isAssignableRuntimeType func(other ref.Val) bool
+
+ // traitMask is a mask of flags which indicate the capabilities of the type.
+ traitMask int
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (t *Type) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return nil, fmt.Errorf("type conversion not supported for 'type'")
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (t *Type) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case TypeType:
+ return TypeType
+ case StringType:
+ return String(t.TypeName())
+ }
+ return NewErr("type conversion error from '%s' to '%s'", TypeType, typeVal)
+}
+
+// Equal indicates whether two types have the same runtime type name.
+//
+// The name Equal is a bit of a misnomer, but for historical reasons, this is the
+// runtime behavior. For a more accurate definition see IsType().
+func (t *Type) Equal(other ref.Val) ref.Val {
+ otherType, ok := other.(ref.Type)
+ return Bool(ok && t.TypeName() == otherType.TypeName())
+}
+
+// HasTrait implements the ref.Type interface method.
+func (t *Type) HasTrait(trait int) bool {
+ return trait&t.traitMask == trait
+}
+
+// IsExactType indicates whether the two types are exactly the same. This check also verifies type parameter type names.
+func (t *Type) IsExactType(other *Type) bool {
+ return t.isTypeInternal(other, true)
+}
+
+// IsEquivalentType indicates whether two types are equivalent. This check ignores type parameter type names.
+func (t *Type) IsEquivalentType(other *Type) bool {
+ return t.isTypeInternal(other, false)
+}
+
+// Kind indicates general category of the type.
+func (t *Type) Kind() Kind {
+ if t == nil {
+ return UnspecifiedKind
+ }
+ return t.kind
+}
+
+// isTypeInternal checks whether the two types are equivalent or exactly the same based on the checkTypeParamName flag.
+func (t *Type) isTypeInternal(other *Type, checkTypeParamName bool) bool {
+ if t == nil {
+ return false
+ }
+ if t == other {
+ return true
+ }
+ if t.Kind() != other.Kind() || len(t.Parameters()) != len(other.Parameters()) {
+ return false
+ }
+ if (checkTypeParamName || t.Kind() != TypeParamKind) && t.TypeName() != other.TypeName() {
+ return false
+ }
+ for i, p := range t.Parameters() {
+ if !p.isTypeInternal(other.Parameters()[i], checkTypeParamName) {
+ return false
+ }
+ }
+ return true
+}
+
+// IsAssignableType determines whether the current type is type-check assignable from the input fromType.
+func (t *Type) IsAssignableType(fromType *Type) bool {
+ if t == nil {
+ return false
+ }
+ if t.isAssignableType != nil {
+ return t.isAssignableType(fromType)
+ }
+ return t.defaultIsAssignableType(fromType)
+}
+
+// IsAssignableRuntimeType determines whether the current type is runtime assignable from the input runtimeType.
+//
+// At runtime, parameterized types are erased and so a function which type-checks to support a map(string, string)
+// will have a runtime assignable type of a map.
+func (t *Type) IsAssignableRuntimeType(val ref.Val) bool {
+ if t == nil {
+ return false
+ }
+ if t.isAssignableRuntimeType != nil {
+ return t.isAssignableRuntimeType(val)
+ }
+ return t.defaultIsAssignableRuntimeType(val)
+}
+
+// Parameters returns the list of type parameters if set.
+//
+// For ListKind, Parameters()[0] represents the list element type
+// For MapKind, Parameters()[0] represents the map key type, and Parameters()[1] represents the map
+// value type.
+func (t *Type) Parameters() []*Type {
+ if t == nil {
+ return emptyParams
+ }
+ return t.parameters
+}
+
+// DeclaredTypeName indicates the fully qualified and parameterized type-check type name.
+func (t *Type) DeclaredTypeName() string {
+ // if the type itself is neither null, nor dyn, but is assignable to null, then it's a wrapper type.
+ if t.Kind() != NullTypeKind && !t.isDyn() && t.IsAssignableType(NullType) {
+ return fmt.Sprintf("wrapper(%s)", t.TypeName())
+ }
+ return t.TypeName()
+}
+
+// Type implements the ref.Val interface method.
+func (t *Type) Type() ref.Type {
+ return TypeType
+}
+
+// Value implements the ref.Val interface method.
+func (t *Type) Value() any {
+ return t.TypeName()
+}
+
+// TypeName returns the type-erased fully qualified runtime type name.
+//
+// TypeName implements the ref.Type interface method.
+func (t *Type) TypeName() string {
+ if t == nil {
+ return ""
+ }
+ return t.runtimeTypeName
+}
+
+// String returns a human-readable definition of the type name.
+func (t *Type) String() string {
+ if len(t.Parameters()) == 0 {
+ return t.DeclaredTypeName()
+ }
+ params := make([]string, len(t.Parameters()))
+ for i, p := range t.Parameters() {
+ params[i] = p.String()
+ }
+ return fmt.Sprintf("%s(%s)", t.DeclaredTypeName(), strings.Join(params, ", "))
+}
+
+// isDyn indicates whether the type is dynamic in any way.
+func (t *Type) isDyn() bool {
+ k := t.Kind()
+ return k == DynKind || k == AnyKind || k == TypeParamKind
+}
+
+// defaultIsAssignableType provides the standard definition of what it means for one type to be assignable to another
+// where any of the following may return a true result:
+// - The from types are the same instance
+// - The target type is dynamic
+// - The fromType has the same kind and type name as the target type, and all parameters of the target type
+//
+// are IsAssignableType() from the parameters of the fromType.
+func (t *Type) defaultIsAssignableType(fromType *Type) bool {
+ if t == fromType || t.isDyn() {
+ return true
+ }
+ if t.Kind() != fromType.Kind() ||
+ t.TypeName() != fromType.TypeName() ||
+ len(t.Parameters()) != len(fromType.Parameters()) {
+ return false
+ }
+ for i, tp := range t.Parameters() {
+ fp := fromType.Parameters()[i]
+ if !tp.IsAssignableType(fp) {
+ return false
+ }
+ }
+ return true
+}
+
+// defaultIsAssignableRuntimeType inspects the type and in the case of list and map elements, the key and element types
+// to determine whether a ref.Val is assignable to the declared type for a function signature.
+func (t *Type) defaultIsAssignableRuntimeType(val ref.Val) bool {
+ valType := val.Type()
+ // If the current type and value type don't agree, then return
+ if !(t.isDyn() || t.TypeName() == valType.TypeName()) {
+ return false
+ }
+ switch t.Kind() {
+ case ListKind:
+ elemType := t.Parameters()[0]
+ l := val.(traits.Lister)
+ if l.Size() == IntZero {
+ return true
+ }
+ it := l.Iterator()
+ elemVal := it.Next()
+ return elemType.IsAssignableRuntimeType(elemVal)
+ case MapKind:
+ keyType := t.Parameters()[0]
+ elemType := t.Parameters()[1]
+ m := val.(traits.Mapper)
+ if m.Size() == IntZero {
+ return true
+ }
+ it := m.Iterator()
+ keyVal := it.Next()
+ elemVal := m.Get(keyVal)
+ return keyType.IsAssignableRuntimeType(keyVal) && elemType.IsAssignableRuntimeType(elemVal)
+ }
+ return true
+}
+
+// NewListType creates an instances of a list type value with the provided element type.
+func NewListType(elemType *Type) *Type {
+ return &Type{
+ kind: ListKind,
+ parameters: []*Type{elemType},
+ runtimeTypeName: "list",
+ traitMask: traits.AdderType |
+ traits.ContainerType |
+ traits.IndexerType |
+ traits.IterableType |
+ traits.SizerType,
+ }
+}
+
+// NewMapType creates an instance of a map type value with the provided key and value types.
+func NewMapType(keyType, valueType *Type) *Type {
+ return &Type{
+ kind: MapKind,
+ parameters: []*Type{keyType, valueType},
+ runtimeTypeName: "map",
+ traitMask: traits.ContainerType |
+ traits.IndexerType |
+ traits.IterableType |
+ traits.SizerType,
+ }
+}
+
+// NewNullableType creates an instance of a nullable type with the provided wrapped type.
+//
+// Note: only primitive types are supported as wrapped types.
+func NewNullableType(wrapped *Type) *Type {
+ return &Type{
+ kind: wrapped.Kind(),
+ parameters: wrapped.Parameters(),
+ runtimeTypeName: wrapped.TypeName(),
+ traitMask: wrapped.traitMask,
+ isAssignableType: func(other *Type) bool {
+ return NullType.IsAssignableType(other) || wrapped.IsAssignableType(other)
+ },
+ isAssignableRuntimeType: func(other ref.Val) bool {
+ return NullType.IsAssignableRuntimeType(other) || wrapped.IsAssignableRuntimeType(other)
+ },
+ }
+}
+
+// NewOptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional.
+func NewOptionalType(param *Type) *Type {
+ return NewOpaqueType("optional", param)
+}
+
+// NewOpaqueType creates an abstract parameterized type with a given name.
+func NewOpaqueType(name string, params ...*Type) *Type {
+ return &Type{
+ kind: OpaqueKind,
+ parameters: params,
+ runtimeTypeName: name,
+ }
+}
+
+// NewObjectType creates a type reference to an externally defined type, e.g. a protobuf message type.
+//
+// An object type is assumed to support field presence testing and field indexing. Additionally, the
+// type may also indicate additional traits through the use of the optional traits vararg argument.
+func NewObjectType(typeName string, traits ...int) *Type {
+ // Function sanitizes object types on the fly
+ if wkt, found := checkedWellKnowns[typeName]; found {
+ return wkt
+ }
+ traitMask := 0
+ for _, trait := range traits {
+ traitMask |= trait
+ }
+ return &Type{
+ kind: StructKind,
+ parameters: emptyParams,
+ runtimeTypeName: typeName,
+ traitMask: structTypeTraitMask | traitMask,
+ }
+}
+
+// NewObjectTypeValue creates a type reference to an externally defined type.
+//
+// Deprecated: use cel.ObjectType(typeName)
+func NewObjectTypeValue(typeName string) *Type {
+ return NewObjectType(typeName)
+}
+
+// NewTypeValue creates an opaque type which has a set of optional type traits as defined in
+// the common/types/traits package.
+//
+// Deprecated: use cel.ObjectType(typeName, traits)
+func NewTypeValue(typeName string, traits ...int) *Type {
+ traitMask := 0
+ for _, trait := range traits {
+ traitMask |= trait
+ }
+ return &Type{
+ kind: StructKind,
+ parameters: emptyParams,
+ runtimeTypeName: typeName,
+ traitMask: traitMask,
+ }
+}
+
+// NewTypeParamType creates a parameterized type instance.
+func NewTypeParamType(paramName string) *Type {
+ return &Type{
+ kind: TypeParamKind,
+ runtimeTypeName: paramName,
+ }
+}
+
+// NewTypeTypeWithParam creates a type with a type parameter.
+// Used for type-checking purposes, but equivalent to TypeType otherwise.
+func NewTypeTypeWithParam(param *Type) *Type {
+ return &Type{
+ kind: TypeKind,
+ runtimeTypeName: "type",
+ parameters: []*Type{param},
+ }
+}
+
+// TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation.
+func TypeToExprType(t *Type) (*exprpb.Type, error) {
+ switch t.Kind() {
+ case AnyKind:
+ return chkdecls.Any, nil
+ case BoolKind:
+ return maybeWrapper(t, chkdecls.Bool), nil
+ case BytesKind:
+ return maybeWrapper(t, chkdecls.Bytes), nil
+ case DoubleKind:
+ return maybeWrapper(t, chkdecls.Double), nil
+ case DurationKind:
+ return chkdecls.Duration, nil
+ case DynKind:
+ return chkdecls.Dyn, nil
+ case ErrorKind:
+ return chkdecls.Error, nil
+ case IntKind:
+ return maybeWrapper(t, chkdecls.Int), nil
+ case ListKind:
+ if len(t.Parameters()) != 1 {
+ return nil, fmt.Errorf("invalid list, got %d parameters, wanted one", len(t.Parameters()))
+ }
+ et, err := TypeToExprType(t.Parameters()[0])
+ if err != nil {
+ return nil, err
+ }
+ return chkdecls.NewListType(et), nil
+ case MapKind:
+ if len(t.Parameters()) != 2 {
+ return nil, fmt.Errorf("invalid map, got %d parameters, wanted two", len(t.Parameters()))
+ }
+ kt, err := TypeToExprType(t.Parameters()[0])
+ if err != nil {
+ return nil, err
+ }
+ vt, err := TypeToExprType(t.Parameters()[1])
+ if err != nil {
+ return nil, err
+ }
+ return chkdecls.NewMapType(kt, vt), nil
+ case NullTypeKind:
+ return chkdecls.Null, nil
+ case OpaqueKind:
+ params := make([]*exprpb.Type, len(t.Parameters()))
+ for i, p := range t.Parameters() {
+ pt, err := TypeToExprType(p)
+ if err != nil {
+ return nil, err
+ }
+ params[i] = pt
+ }
+ return chkdecls.NewAbstractType(t.TypeName(), params...), nil
+ case StringKind:
+ return maybeWrapper(t, chkdecls.String), nil
+ case StructKind:
+ return chkdecls.NewObjectType(t.TypeName()), nil
+ case TimestampKind:
+ return chkdecls.Timestamp, nil
+ case TypeParamKind:
+ return chkdecls.NewTypeParamType(t.TypeName()), nil
+ case TypeKind:
+ if len(t.Parameters()) == 1 {
+ p, err := TypeToExprType(t.Parameters()[0])
+ if err != nil {
+ return nil, err
+ }
+ return chkdecls.NewTypeType(p), nil
+ }
+ return chkdecls.NewTypeType(nil), nil
+ case UintKind:
+ return maybeWrapper(t, chkdecls.Uint), nil
+ }
+ return nil, fmt.Errorf("missing type conversion to proto: %v", t)
+}
+
+// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation.
+func ExprTypeToType(t *exprpb.Type) (*Type, error) {
+ switch t.GetTypeKind().(type) {
+ case *exprpb.Type_Dyn:
+ return DynType, nil
+ case *exprpb.Type_AbstractType_:
+ paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes()))
+ for i, p := range t.GetAbstractType().GetParameterTypes() {
+ pt, err := ExprTypeToType(p)
+ if err != nil {
+ return nil, err
+ }
+ paramTypes[i] = pt
+ }
+ return NewOpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil
+ case *exprpb.Type_ListType_:
+ et, err := ExprTypeToType(t.GetListType().GetElemType())
+ if err != nil {
+ return nil, err
+ }
+ return NewListType(et), nil
+ case *exprpb.Type_MapType_:
+ kt, err := ExprTypeToType(t.GetMapType().GetKeyType())
+ if err != nil {
+ return nil, err
+ }
+ vt, err := ExprTypeToType(t.GetMapType().GetValueType())
+ if err != nil {
+ return nil, err
+ }
+ return NewMapType(kt, vt), nil
+ case *exprpb.Type_MessageType:
+ return NewObjectType(t.GetMessageType()), nil
+ case *exprpb.Type_Null:
+ return NullType, nil
+ case *exprpb.Type_Primitive:
+ switch t.GetPrimitive() {
+ case exprpb.Type_BOOL:
+ return BoolType, nil
+ case exprpb.Type_BYTES:
+ return BytesType, nil
+ case exprpb.Type_DOUBLE:
+ return DoubleType, nil
+ case exprpb.Type_INT64:
+ return IntType, nil
+ case exprpb.Type_STRING:
+ return StringType, nil
+ case exprpb.Type_UINT64:
+ return UintType, nil
+ default:
+ return nil, fmt.Errorf("unsupported primitive type: %v", t)
+ }
+ case *exprpb.Type_TypeParam:
+ return NewTypeParamType(t.GetTypeParam()), nil
+ case *exprpb.Type_Type:
+ if t.GetType().GetTypeKind() != nil {
+ p, err := ExprTypeToType(t.GetType())
+ if err != nil {
+ return nil, err
+ }
+ return NewTypeTypeWithParam(p), nil
+ }
+ return TypeType, nil
+ case *exprpb.Type_WellKnown:
+ switch t.GetWellKnown() {
+ case exprpb.Type_ANY:
+ return AnyType, nil
+ case exprpb.Type_DURATION:
+ return DurationType, nil
+ case exprpb.Type_TIMESTAMP:
+ return TimestampType, nil
+ default:
+ return nil, fmt.Errorf("unsupported well-known type: %v", t)
+ }
+ case *exprpb.Type_Wrapper:
+ t, err := ExprTypeToType(&exprpb.Type{TypeKind: &exprpb.Type_Primitive{Primitive: t.GetWrapper()}})
+ if err != nil {
+ return nil, err
+ }
+ return NewNullableType(t), nil
+ case *exprpb.Type_Error:
+ return ErrorType, nil
+ default:
+ return nil, fmt.Errorf("unsupported type: %v", t)
+ }
+}
+
+func maybeWrapper(t *Type, pbType *exprpb.Type) *exprpb.Type {
+ if t.IsAssignableType(NullType) {
+ return chkdecls.NewWrapperType(pbType)
+ }
+ return pbType
+}
+
+func maybeForeignType(t ref.Type) *Type {
+ if celType, ok := t.(*Type); ok {
+ return celType
+ }
+ // Inspect the incoming type to determine its traits. The assumption will be that the incoming
+ // type does not have any field values; however, if the trait mask indicates that field testing
+ // and indexing are supported, the foreign type is marked as a struct.
+ traitMask := 0
+ for _, trait := range allTraits {
+ if t.HasTrait(trait) {
+ traitMask |= trait
+ }
+ }
+ // Treat the value like a struct. If it has no fields, this is harmless to denote the type
+ // as such since it basically becomes an opaque type by convention.
+ return NewObjectType(t.TypeName(), traitMask)
+}
+
+var (
+ checkedWellKnowns = map[string]*Type{
+ // Wrapper types.
+ "google.protobuf.BoolValue": NewNullableType(BoolType),
+ "google.protobuf.BytesValue": NewNullableType(BytesType),
+ "google.protobuf.DoubleValue": NewNullableType(DoubleType),
+ "google.protobuf.FloatValue": NewNullableType(DoubleType),
+ "google.protobuf.Int64Value": NewNullableType(IntType),
+ "google.protobuf.Int32Value": NewNullableType(IntType),
+ "google.protobuf.UInt64Value": NewNullableType(UintType),
+ "google.protobuf.UInt32Value": NewNullableType(UintType),
+ "google.protobuf.StringValue": NewNullableType(StringType),
+ // Well-known types.
+ "google.protobuf.Any": AnyType,
+ "google.protobuf.Duration": DurationType,
+ "google.protobuf.Timestamp": TimestampType,
+ // Json types.
+ "google.protobuf.ListValue": NewListType(DynType),
+ "google.protobuf.NullValue": NullType,
+ "google.protobuf.Struct": NewMapType(StringType, DynType),
+ "google.protobuf.Value": DynType,
+ }
+
+ emptyParams = []*Type{}
+
+ allTraits = []int{
+ traits.AdderType,
+ traits.ComparerType,
+ traits.ContainerType,
+ traits.DividerType,
+ traits.FieldTesterType,
+ traits.IndexerType,
+ traits.IterableType,
+ traits.IteratorType,
+ traits.MatcherType,
+ traits.ModderType,
+ traits.MultiplierType,
+ traits.NegatorType,
+ traits.ReceiverType,
+ traits.SizerType,
+ traits.SubtractorType,
+ }
+
+ structTypeTraitMask = traits.FieldTesterType | traits.IndexerType
+)
diff --git a/vendor/github.com/google/cel-go/common/types/uint.go b/vendor/github.com/google/cel-go/common/types/uint.go
new file mode 100644
index 0000000..3257f9a
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/uint.go
@@ -0,0 +1,244 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Uint type implementation which supports comparison and math operators.
+type Uint uint64
+
+var (
+ uint32WrapperType = reflect.TypeOf(&wrapperspb.UInt32Value{})
+
+ uint64WrapperType = reflect.TypeOf(&wrapperspb.UInt64Value{})
+)
+
+// Uint constants
+const (
+ uintZero = Uint(0)
+)
+
+// Add implements traits.Adder.Add.
+func (i Uint) Add(other ref.Val) ref.Val {
+ otherUint, ok := other.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := addUint64Checked(uint64(i), uint64(otherUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(val)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (i Uint) Compare(other ref.Val) ref.Val {
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return NewErr("NaN values cannot be ordered")
+ }
+ return compareUintDouble(i, ov)
+ case Int:
+ return compareUintInt(i, ov)
+ case Uint:
+ return compareUint(i, ov)
+ default:
+ return MaybeNoSuchOverloadErr(other)
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (i Uint) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Uint, reflect.Uint32:
+ v, err := uint64ToUint32Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Uint64:
+ return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.UInt64(uint64(i)))
+ case jsonValueType:
+ // JSON can accurately represent 32-bit uints as floating point values.
+ if i.isJSONSafe() {
+ return structpb.NewNumberValue(float64(i)), nil
+ }
+ // Proto3 to JSON conversion requires string-formatted uint64 values
+ // since the conversion to floating point would result in truncation.
+ return structpb.NewStringValue(strconv.FormatUint(uint64(i), 10)), nil
+ case uint32WrapperType:
+ // Convert the value to a wrapperspb.UInt32Value, error on overflow.
+ v, err := uint64ToUint32Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ return wrapperspb.UInt32(v), nil
+ case uint64WrapperType:
+ // Convert the value to a wrapperspb.UInt64Value.
+ return wrapperspb.UInt64(uint64(i)), nil
+ }
+ switch typeDesc.Elem().Kind() {
+ case reflect.Uint32:
+ v, err := uint64ToUint32Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ case reflect.Uint64:
+ v := uint64(i)
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ }
+ case reflect.Interface:
+ iv := i.Value()
+ if reflect.TypeOf(iv).Implements(typeDesc) {
+ return iv, nil
+ }
+ if reflect.TypeOf(i).Implements(typeDesc) {
+ return i, nil
+ }
+ }
+ return nil, fmt.Errorf("unsupported type conversion from 'uint' to %v", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (i Uint) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ v, err := uint64ToInt64Checked(uint64(i))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(v)
+ case UintType:
+ return i
+ case DoubleType:
+ return Double(i)
+ case StringType:
+ return String(fmt.Sprintf("%d", uint64(i)))
+ case TypeType:
+ return UintType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", UintType, typeVal)
+}
+
+// Divide implements traits.Divider.Divide.
+func (i Uint) Divide(other ref.Val) ref.Val {
+ otherUint, ok := other.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ div, err := divideUint64Checked(uint64(i), uint64(otherUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(div)
+}
+
+// Equal implements ref.Val.Equal.
+func (i Uint) Equal(other ref.Val) ref.Val {
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return False
+ }
+ return Bool(compareUintDouble(i, ov) == 0)
+ case Int:
+ return Bool(compareUintInt(i, ov) == 0)
+ case Uint:
+ return Bool(i == ov)
+ default:
+ return False
+ }
+}
+
+// IsZeroValue returns true if the uint is zero.
+func (i Uint) IsZeroValue() bool {
+ return i == 0
+}
+
+// Modulo implements traits.Modder.Modulo.
+func (i Uint) Modulo(other ref.Val) ref.Val {
+ otherUint, ok := other.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ mod, err := moduloUint64Checked(uint64(i), uint64(otherUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(mod)
+}
+
+// Multiply implements traits.Multiplier.Multiply.
+func (i Uint) Multiply(other ref.Val) ref.Val {
+ otherUint, ok := other.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := multiplyUint64Checked(uint64(i), uint64(otherUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(val)
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (i Uint) Subtract(subtrahend ref.Val) ref.Val {
+ subtraUint, ok := subtrahend.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(subtrahend)
+ }
+ val, err := subtractUint64Checked(uint64(i), uint64(subtraUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(val)
+}
+
+// Type implements ref.Val.Type.
+func (i Uint) Type() ref.Type {
+ return UintType
+}
+
+// Value implements ref.Val.Value.
+func (i Uint) Value() any {
+ return uint64(i)
+}
+
+// isJSONSafe indicates whether the uint is safely representable as a floating point value in JSON.
+func (i Uint) isJSONSafe() bool {
+ return i <= maxIntJSON
+}
diff --git a/vendor/github.com/google/cel-go/common/types/unknown.go b/vendor/github.com/google/cel-go/common/types/unknown.go
new file mode 100644
index 0000000..9dd2b25
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/unknown.go
@@ -0,0 +1,326 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+ "unicode"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+var (
+ unspecifiedAttribute = &AttributeTrail{qualifierPath: []any{}}
+)
+
+// NewAttributeTrail creates a new simple attribute from a variable name.
+func NewAttributeTrail(variable string) *AttributeTrail {
+ if variable == "" {
+ return unspecifiedAttribute
+ }
+ return &AttributeTrail{variable: variable}
+}
+
+// AttributeTrail specifies a variable with an optional qualifier path. An attribute value is expected to
+// correspond to an AbsoluteAttribute, meaning a field selection which starts with a top-level variable.
+//
+// The qualifer path elements adhere to the AttributeQualifier type constraint.
+type AttributeTrail struct {
+ variable string
+ qualifierPath []any
+}
+
+// Equal returns whether two attribute values have the same variable name and qualifier paths.
+func (a *AttributeTrail) Equal(other *AttributeTrail) bool {
+ if a.Variable() != other.Variable() || len(a.QualifierPath()) != len(other.QualifierPath()) {
+ return false
+ }
+ for i, q := range a.QualifierPath() {
+ qual := other.QualifierPath()[i]
+ if !qualifiersEqual(q, qual) {
+ return false
+ }
+ }
+ return true
+}
+
+func qualifiersEqual(a, b any) bool {
+ if a == b {
+ return true
+ }
+ switch numA := a.(type) {
+ case int64:
+ numB, ok := b.(uint64)
+ if !ok {
+ return false
+ }
+ return intUintEqual(numA, numB)
+ case uint64:
+ numB, ok := b.(int64)
+ if !ok {
+ return false
+ }
+ return intUintEqual(numB, numA)
+ default:
+ return false
+ }
+}
+
+func intUintEqual(i int64, u uint64) bool {
+ if i < 0 || u > math.MaxInt64 {
+ return false
+ }
+ return i == int64(u)
+}
+
+// Variable returns the variable name associated with the attribute.
+func (a *AttributeTrail) Variable() string {
+ return a.variable
+}
+
+// QualifierPath returns the optional set of qualifying fields or indices applied to the variable.
+func (a *AttributeTrail) QualifierPath() []any {
+ return a.qualifierPath
+}
+
+// String returns the string representation of the Attribute.
+func (a *AttributeTrail) String() string {
+ if a.variable == "" {
+ return ""
+ }
+ var str strings.Builder
+ str.WriteString(a.variable)
+ for _, q := range a.qualifierPath {
+ switch q := q.(type) {
+ case bool, int64:
+ str.WriteString(fmt.Sprintf("[%v]", q))
+ case uint64:
+ str.WriteString(fmt.Sprintf("[%vu]", q))
+ case string:
+ if isIdentifierCharacter(q) {
+ str.WriteString(fmt.Sprintf(".%v", q))
+ } else {
+ str.WriteString(fmt.Sprintf("[%q]", q))
+ }
+ }
+ }
+ return str.String()
+}
+
+func isIdentifierCharacter(str string) bool {
+ for _, c := range str {
+ if unicode.IsLetter(c) || unicode.IsDigit(c) || string(c) == "_" {
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+// AttributeQualifier constrains the possible types which may be used to qualify an attribute.
+type AttributeQualifier interface {
+ bool | int64 | uint64 | string
+}
+
+// QualifyAttribute qualifies an attribute using a valid AttributeQualifier type.
+func QualifyAttribute[T AttributeQualifier](attr *AttributeTrail, qualifier T) *AttributeTrail {
+ attr.qualifierPath = append(attr.qualifierPath, qualifier)
+ return attr
+}
+
+// Unknown type which collects expression ids which caused the current value to become unknown.
+type Unknown struct {
+ attributeTrails map[int64][]*AttributeTrail
+}
+
+// NewUnknown creates a new unknown at a given expression id for an attribute.
+//
+// If the attribute is nil, the attribute value will be the `unspecifiedAttribute`.
+func NewUnknown(id int64, attr *AttributeTrail) *Unknown {
+ if attr == nil {
+ attr = unspecifiedAttribute
+ }
+ return &Unknown{
+ attributeTrails: map[int64][]*AttributeTrail{id: {attr}},
+ }
+}
+
+// IDs returns the set of unknown expression ids contained by this value.
+//
+// Numeric identifiers are guaranteed to be in sorted order.
+func (u *Unknown) IDs() []int64 {
+ ids := make(int64Slice, len(u.attributeTrails))
+ i := 0
+ for id := range u.attributeTrails {
+ ids[i] = id
+ i++
+ }
+ ids.Sort()
+ return ids
+}
+
+// GetAttributeTrails returns the attribute trails, if present, missing for a given expression id.
+func (u *Unknown) GetAttributeTrails(id int64) ([]*AttributeTrail, bool) {
+ trails, found := u.attributeTrails[id]
+ return trails, found
+}
+
+// Contains returns true if the input unknown is a subset of the current unknown.
+func (u *Unknown) Contains(other *Unknown) bool {
+ for id, otherTrails := range other.attributeTrails {
+ trails, found := u.attributeTrails[id]
+ if !found || len(otherTrails) != len(trails) {
+ return false
+ }
+ for _, ot := range otherTrails {
+ found := false
+ for _, t := range trails {
+ if t.Equal(ot) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (u *Unknown) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return u.Value(), nil
+}
+
+// ConvertToType is an identity function since unknown values cannot be modified.
+func (u *Unknown) ConvertToType(typeVal ref.Type) ref.Val {
+ return u
+}
+
+// Equal is an identity function since unknown values cannot be modified.
+func (u *Unknown) Equal(other ref.Val) ref.Val {
+ return u
+}
+
+// String implements the Stringer interface
+func (u *Unknown) String() string {
+ var str strings.Builder
+ for id, attrs := range u.attributeTrails {
+ if str.Len() != 0 {
+ str.WriteString(", ")
+ }
+ if len(attrs) == 1 {
+ str.WriteString(fmt.Sprintf("%v (%d)", attrs[0], id))
+ } else {
+ str.WriteString(fmt.Sprintf("%v (%d)", attrs, id))
+ }
+ }
+ return str.String()
+}
+
+// Type implements ref.Val.Type.
+func (u *Unknown) Type() ref.Type {
+ return UnknownType
+}
+
+// Value implements ref.Val.Value.
+func (u *Unknown) Value() any {
+ return u
+}
+
+// IsUnknown returns whether the element ref.Val is in instance of *types.Unknown
+func IsUnknown(val ref.Val) bool {
+ switch val.(type) {
+ case *Unknown:
+ return true
+ default:
+ return false
+ }
+}
+
+// MaybeMergeUnknowns determines whether an input value and another, possibly nil, unknown will produce
+// an unknown result.
+//
+// If the input `val` is another Unknown, then the result will be the merge of the `val` and the input
+// `unk`. If the `val` is not unknown, then the result will depend on whether the input `unk` is nil.
+// If both values are non-nil and unknown, then the return value will be a merge of both unknowns.
+func MaybeMergeUnknowns(val ref.Val, unk *Unknown) (*Unknown, bool) {
+ src, isUnk := val.(*Unknown)
+ if !isUnk {
+ if unk != nil {
+ return unk, true
+ }
+ return unk, false
+ }
+ return MergeUnknowns(src, unk), true
+}
+
+// MergeUnknowns combines two unknown values into a new unknown value.
+func MergeUnknowns(unk1, unk2 *Unknown) *Unknown {
+ if unk1 == nil {
+ return unk2
+ }
+ if unk2 == nil {
+ return unk1
+ }
+ out := &Unknown{
+ attributeTrails: make(map[int64][]*AttributeTrail, len(unk1.attributeTrails)+len(unk2.attributeTrails)),
+ }
+ for id, ats := range unk1.attributeTrails {
+ out.attributeTrails[id] = ats
+ }
+ for id, ats := range unk2.attributeTrails {
+ existing, found := out.attributeTrails[id]
+ if !found {
+ out.attributeTrails[id] = ats
+ continue
+ }
+
+ for _, at := range ats {
+ found := false
+ for _, et := range existing {
+ if at.Equal(et) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ existing = append(existing, at)
+ }
+ }
+ out.attributeTrails[id] = existing
+ }
+ return out
+}
+
+// int64Slice is an implementation of the sort.Interface
+type int64Slice []int64
+
+// Len returns the number of elements in the slice.
+func (x int64Slice) Len() int { return len(x) }
+
+// Less indicates whether the value at index i is less than the value at index j.
+func (x int64Slice) Less(i, j int) bool { return x[i] < x[j] }
+
+// Swap swaps the values at indices i and j in place.
+func (x int64Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+// Sort is a convenience method: x.Sort() calls Sort(x).
+func (x int64Slice) Sort() { sort.Sort(x) }
diff --git a/vendor/github.com/google/cel-go/common/types/util.go b/vendor/github.com/google/cel-go/common/types/util.go
new file mode 100644
index 0000000..71662ee
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/util.go
@@ -0,0 +1,48 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// IsUnknownOrError returns whether the input element ref.Val is an ErrType or UnknownType.
+func IsUnknownOrError(val ref.Val) bool {
+ switch val.(type) {
+ case *Unknown, *Err:
+ return true
+ }
+ return false
+}
+
+// IsPrimitiveType returns whether the input element ref.Val is a primitive type.
+// Note, primitive types do not include well-known types such as Duration and Timestamp.
+func IsPrimitiveType(val ref.Val) bool {
+ switch val.Type() {
+ case BoolType, BytesType, DoubleType, IntType, StringType, UintType:
+ return true
+ }
+ return false
+}
+
+// Equal returns whether the two ref.Value are heterogeneously equivalent.
+func Equal(lhs ref.Val, rhs ref.Val) ref.Val {
+ lNull := lhs == NullValue
+ rNull := rhs == NullValue
+ if lNull || rNull {
+ return Bool(lNull == rNull)
+ }
+ return lhs.Equal(rhs)
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
new file mode 100644
index 0000000..3a5219e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
@@ -0,0 +1,75 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "activation.go",
+ "attribute_patterns.go",
+ "attributes.go",
+ "decorators.go",
+ "dispatcher.go",
+ "evalstate.go",
+ "formatting.go",
+ "interpretable.go",
+ "interpreter.go",
+ "optimizations.go",
+ "planner.go",
+ "prune.go",
+ "runtimecost.go",
+ ],
+ importpath = "github.com/google/cel-go/interpreter",
+ deps = [
+ "//common:go_default_library",
+ "//common/ast:go_default_library",
+ "//common/containers:go_default_library",
+ "//common/functions:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
+ "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ srcs = [
+ "activation_test.go",
+ "attribute_patterns_test.go",
+ "attributes_test.go",
+ "interpreter_test.go",
+ "prune_test.go",
+ "runtimecost_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//checker:go_default_library",
+ "//common/containers:go_default_library",
+ "//common/debug:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/functions:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/stdlib:go_default_library",
+ "//common/types:go_default_library",
+ "//parser:go_default_library",
+ "//test:go_default_library",
+ "//test/proto2pb:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/interpreter/activation.go b/vendor/github.com/google/cel-go/interpreter/activation.go
new file mode 100644
index 0000000..a802644
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/activation.go
@@ -0,0 +1,201 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Activation used to resolve identifiers by name and references by id.
+//
+// An Activation is the primary mechanism by which a caller supplies input into a CEL program.
+type Activation interface {
+ // ResolveName returns a value from the activation by qualified name, or false if the name
+ // could not be found.
+ ResolveName(name string) (any, bool)
+
+ // Parent returns the parent of the current activation, may be nil.
+ // If non-nil, the parent will be searched during resolve calls.
+ Parent() Activation
+}
+
+// EmptyActivation returns a variable-free activation.
+func EmptyActivation() Activation {
+ return emptyActivation{}
+}
+
+// emptyActivation is a variable-free activation.
+type emptyActivation struct{}
+
+func (emptyActivation) ResolveName(string) (any, bool) { return nil, false }
+func (emptyActivation) Parent() Activation { return nil }
+
+// NewActivation returns an activation based on a map-based binding where the map keys are
+// expected to be qualified names used with ResolveName calls.
+//
+// The input `bindings` may either be of type `Activation` or `map[string]any`.
+//
+// Lazy bindings may be supplied within the map-based input in either of the following forms:
+// - func() any
+// - func() ref.Val
+//
+// The output of the lazy binding will overwrite the variable reference in the internal map.
+//
+// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
+// the types.Adapter configured in the environment.
+func NewActivation(bindings any) (Activation, error) {
+ if bindings == nil {
+ return nil, errors.New("bindings must be non-nil")
+ }
+ a, isActivation := bindings.(Activation)
+ if isActivation {
+ return a, nil
+ }
+ m, isMap := bindings.(map[string]any)
+ if !isMap {
+ return nil, fmt.Errorf(
+ "activation input must be an activation or map[string]interface: got %T",
+ bindings)
+ }
+ return &mapActivation{bindings: m}, nil
+}
+
+// mapActivation which implements Activation and maps of named values.
+//
+// Named bindings may lazily supply values by providing a function which accepts no arguments and
+// produces an interface value.
+type mapActivation struct {
+ bindings map[string]any
+}
+
+// Parent implements the Activation interface method.
+func (a *mapActivation) Parent() Activation {
+ return nil
+}
+
+// ResolveName implements the Activation interface method.
+func (a *mapActivation) ResolveName(name string) (any, bool) {
+ obj, found := a.bindings[name]
+ if !found {
+ return nil, false
+ }
+ fn, isLazy := obj.(func() ref.Val)
+ if isLazy {
+ obj = fn()
+ a.bindings[name] = obj
+ }
+ fnRaw, isLazy := obj.(func() any)
+ if isLazy {
+ obj = fnRaw()
+ a.bindings[name] = obj
+ }
+ return obj, found
+}
+
+// hierarchicalActivation which implements Activation and contains a parent and
+// child activation.
+type hierarchicalActivation struct {
+ parent Activation
+ child Activation
+}
+
+// Parent implements the Activation interface method.
+func (a *hierarchicalActivation) Parent() Activation {
+ return a.parent
+}
+
+// ResolveName implements the Activation interface method.
+func (a *hierarchicalActivation) ResolveName(name string) (any, bool) {
+ if object, found := a.child.ResolveName(name); found {
+ return object, found
+ }
+ return a.parent.ResolveName(name)
+}
+
+// NewHierarchicalActivation takes two activations and produces a new one which prioritizes
+// resolution in the child first and parent(s) second.
+func NewHierarchicalActivation(parent Activation, child Activation) Activation {
+ return &hierarchicalActivation{parent, child}
+}
+
+// NewPartialActivation returns an Activation which contains a list of AttributePattern values
+// representing field and index operations that should result in a 'types.Unknown' result.
+//
+// The `bindings` value may be any value type supported by the interpreter.NewActivation call,
+// but is typically either an existing Activation or map[string]any.
+func NewPartialActivation(bindings any,
+ unknowns ...*AttributePattern) (PartialActivation, error) {
+ a, err := NewActivation(bindings)
+ if err != nil {
+ return nil, err
+ }
+ return &partActivation{Activation: a, unknowns: unknowns}, nil
+}
+
+// PartialActivation extends the Activation interface with a set of UnknownAttributePatterns.
+type PartialActivation interface {
+ Activation
+
+ // UnknownAttributePaths returns a set of AttributePattern values which match Attribute
+ // expressions for data accesses whose values are not yet known.
+ UnknownAttributePatterns() []*AttributePattern
+}
+
+// partActivation is the default implementations of the PartialActivation interface.
+type partActivation struct {
+ Activation
+ unknowns []*AttributePattern
+}
+
+// UnknownAttributePatterns implements the PartialActivation interface method.
+func (a *partActivation) UnknownAttributePatterns() []*AttributePattern {
+ return a.unknowns
+}
+
+// varActivation represents a single mutable variable binding.
+//
+// This activation type should only be used within folds as the fold loop controls the object
+// life-cycle.
+type varActivation struct {
+ parent Activation
+ name string
+ val ref.Val
+}
+
+// Parent implements the Activation interface method.
+func (v *varActivation) Parent() Activation {
+ return v.parent
+}
+
+// ResolveName implements the Activation interface method.
+func (v *varActivation) ResolveName(name string) (any, bool) {
+ if name == v.name {
+ return v.val, true
+ }
+ return v.parent.ResolveName(name)
+}
+
+var (
+ // pool of var activations to reduce allocations during folds.
+ varActivationPool = &sync.Pool{
+ New: func() any {
+ return &varActivation{}
+ },
+ }
+)
diff --git a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
new file mode 100644
index 0000000..1fbaaf1
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
@@ -0,0 +1,399 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// AttributePattern represents a top-level variable with an optional set of qualifier patterns.
+//
+// When using a CEL expression within a container, e.g. a package or namespace, the variable name
+// in the pattern must match the qualified name produced during the variable namespace resolution.
+// For example, if variable `c` appears in an expression whose container is `a.b`, the variable
+// name supplied to the pattern must be `a.b.c`
+//
+// The qualifier patterns for attribute matching must be one of the following:
+//
+// - valid map key type: string, int, uint, bool
+// - wildcard (*)
+//
+// Examples:
+//
+// 1. ns.myvar["complex-value"]
+// 2. ns.myvar["complex-value"][0]
+// 3. ns.myvar["complex-value"].*.name
+//
+// The first example is simple: match an attribute where the variable is 'ns.myvar' with a
+// field access on 'complex-value'. The second example expands the match to indicate that only
+// a specific index `0` should match. And lastly, the third example matches any indexed access
+// that later selects the 'name' field.
+type AttributePattern struct {
+ variable string
+ qualifierPatterns []*AttributeQualifierPattern
+}
+
+// NewAttributePattern produces a new mutable AttributePattern based on a variable name.
+func NewAttributePattern(variable string) *AttributePattern {
+ return &AttributePattern{
+ variable: variable,
+ qualifierPatterns: []*AttributeQualifierPattern{},
+ }
+}
+
+// QualString adds a string qualifier pattern to the AttributePattern. The string may be a valid
+// identifier, or string map key including empty string.
+func (apat *AttributePattern) QualString(pattern string) *AttributePattern {
+ apat.qualifierPatterns = append(apat.qualifierPatterns,
+ &AttributeQualifierPattern{value: pattern})
+ return apat
+}
+
+// QualInt adds an int qualifier pattern to the AttributePattern. The index may be either a map or
+// list index.
+func (apat *AttributePattern) QualInt(pattern int64) *AttributePattern {
+ apat.qualifierPatterns = append(apat.qualifierPatterns,
+ &AttributeQualifierPattern{value: pattern})
+ return apat
+}
+
+// QualUint adds an uint qualifier pattern for a map index operation to the AttributePattern.
+func (apat *AttributePattern) QualUint(pattern uint64) *AttributePattern {
+ apat.qualifierPatterns = append(apat.qualifierPatterns,
+ &AttributeQualifierPattern{value: pattern})
+ return apat
+}
+
+// QualBool adds a bool qualifier pattern for a map index operation to the AttributePattern.
+func (apat *AttributePattern) QualBool(pattern bool) *AttributePattern {
+ apat.qualifierPatterns = append(apat.qualifierPatterns,
+ &AttributeQualifierPattern{value: pattern})
+ return apat
+}
+
+// Wildcard adds a special sentinel qualifier pattern that will match any single qualifier.
+func (apat *AttributePattern) Wildcard() *AttributePattern {
+ apat.qualifierPatterns = append(apat.qualifierPatterns,
+ &AttributeQualifierPattern{wildcard: true})
+ return apat
+}
+
+// VariableMatches returns true if the fully qualified variable matches the AttributePattern
+// fully qualified variable name.
+func (apat *AttributePattern) VariableMatches(variable string) bool {
+ return apat.variable == variable
+}
+
+// QualifierPatterns returns the set of AttributeQualifierPattern values on the AttributePattern.
+func (apat *AttributePattern) QualifierPatterns() []*AttributeQualifierPattern {
+ return apat.qualifierPatterns
+}
+
+// AttributeQualifierPattern holds a wildcard or valued qualifier pattern.
+type AttributeQualifierPattern struct {
+ wildcard bool
+ value any
+}
+
+// Matches returns true if the qualifier pattern is a wildcard, or the Qualifier implements the
+// qualifierValueEquator interface and its IsValueEqualTo returns true for the qualifier pattern.
+func (qpat *AttributeQualifierPattern) Matches(q Qualifier) bool {
+ if qpat.wildcard {
+ return true
+ }
+ qve, ok := q.(qualifierValueEquator)
+ return ok && qve.QualifierValueEquals(qpat.value)
+}
+
+// qualifierValueEquator defines an interface for determining if an input value, of valid map key
+// type, is equal to the value held in the Qualifier. This interface is used by the
+// AttributeQualifierPattern to determine pattern matches for non-wildcard qualifier patterns.
+//
+// Note: Attribute values are also Qualifier values; however, Attributes are resolved before
+// qualification happens. This is an implementation detail, but one relevant to why the Attribute
+// types do not surface in the list of implementations.
+//
+// See: partialAttributeFactory.matchesUnknownPatterns for more details on how this interface is
+// used.
+type qualifierValueEquator interface {
+ // QualifierValueEquals returns true if the input value is equal to the value held in the
+ // Qualifier.
+ QualifierValueEquals(value any) bool
+}
+
+// QualifierValueEquals implementation for boolean qualifiers.
+func (q *boolQualifier) QualifierValueEquals(value any) bool {
+ bval, ok := value.(bool)
+ return ok && q.value == bval
+}
+
+// QualifierValueEquals implementation for field qualifiers.
+func (q *fieldQualifier) QualifierValueEquals(value any) bool {
+ sval, ok := value.(string)
+ return ok && q.Name == sval
+}
+
+// QualifierValueEquals implementation for string qualifiers.
+func (q *stringQualifier) QualifierValueEquals(value any) bool {
+ sval, ok := value.(string)
+ return ok && q.value == sval
+}
+
+// QualifierValueEquals implementation for int qualifiers.
+func (q *intQualifier) QualifierValueEquals(value any) bool {
+ return numericValueEquals(value, q.celValue)
+}
+
+// QualifierValueEquals implementation for uint qualifiers.
+func (q *uintQualifier) QualifierValueEquals(value any) bool {
+ return numericValueEquals(value, q.celValue)
+}
+
+// QualifierValueEquals implementation for double qualifiers.
+func (q *doubleQualifier) QualifierValueEquals(value any) bool {
+ return numericValueEquals(value, q.celValue)
+}
+
+// numericValueEquals uses CEL equality to determine whether two number values are
+func numericValueEquals(value any, celValue ref.Val) bool {
+ val := types.DefaultTypeAdapter.NativeToValue(value)
+ return celValue.Equal(val) == types.True
+}
+
+// NewPartialAttributeFactory returns an AttributeFactory implementation capable of performing
+// AttributePattern matches with PartialActivation inputs.
+func NewPartialAttributeFactory(container *containers.Container,
+ adapter types.Adapter,
+ provider types.Provider) AttributeFactory {
+ fac := NewAttributeFactory(container, adapter, provider)
+ return &partialAttributeFactory{
+ AttributeFactory: fac,
+ container: container,
+ adapter: adapter,
+ provider: provider,
+ }
+}
+
+type partialAttributeFactory struct {
+ AttributeFactory
+ container *containers.Container
+ adapter types.Adapter
+ provider types.Provider
+}
+
+// AbsoluteAttribute implementation of the AttributeFactory interface which wraps the
+// NamespacedAttribute resolution in an internal attributeMatcher object to dynamically match
+// unknown patterns from PartialActivation inputs if given.
+func (fac *partialAttributeFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute {
+ attr := fac.AttributeFactory.AbsoluteAttribute(id, names...)
+ return &attributeMatcher{fac: fac, NamespacedAttribute: attr}
+}
+
+// MaybeAttribute implementation of the AttributeFactory interface which ensure that the set of
+// 'maybe' NamespacedAttribute values are produced using the partialAttributeFactory rather than
+// the base AttributeFactory implementation.
+func (fac *partialAttributeFactory) MaybeAttribute(id int64, name string) Attribute {
+ return &maybeAttribute{
+ id: id,
+ attrs: []NamespacedAttribute{
+ fac.AbsoluteAttribute(id, fac.container.ResolveCandidateNames(name)...),
+ },
+ adapter: fac.adapter,
+ provider: fac.provider,
+ fac: fac,
+ }
+}
+
+// matchesUnknownPatterns returns true if the variable names and qualifiers for a given
+// Attribute value match any of the ActivationPattern objects in the set of unknown activation
+// patterns on the given PartialActivation.
+//
+// For example, in the expression `a.b`, the Attribute is composed of variable `a`, with string
+// qualifier `b`. When a PartialActivation is supplied, it indicates that some or all of the data
+// provided in the input is unknown by specifying unknown AttributePatterns. An AttributePattern
+// that refers to variable `a` with a string qualifier of `c` will not match `a.b`; however, any
+// of the following patterns will match Attribute `a.b`:
+//
+// - `AttributePattern("a")`
+// - `AttributePattern("a").Wildcard()`
+// - `AttributePattern("a").QualString("b")`
+// - `AttributePattern("a").QualString("b").QualInt(0)`
+//
+// Any AttributePattern which overlaps an Attribute or vice-versa will produce an Unknown result
+// for the last pattern matched variable or qualifier in the Attribute. In the first matching
+// example, the expression id representing variable `a` would be listed in the Unknown result,
+// whereas in the other pattern examples, the qualifier `b` would be returned as the Unknown.
+func (fac *partialAttributeFactory) matchesUnknownPatterns(
+ vars PartialActivation,
+ attrID int64,
+ variableNames []string,
+ qualifiers []Qualifier) (*types.Unknown, error) {
+ patterns := vars.UnknownAttributePatterns()
+ candidateIndices := map[int]struct{}{}
+ for _, variable := range variableNames {
+ for i, pat := range patterns {
+ if pat.VariableMatches(variable) {
+ if len(qualifiers) == 0 {
+ return types.NewUnknown(attrID, types.NewAttributeTrail(variable)), nil
+ }
+ candidateIndices[i] = struct{}{}
+ }
+ }
+ }
+ // Determine whether to return early if there are no candidate unknown patterns.
+ if len(candidateIndices) == 0 {
+ return nil, nil
+ }
+ // Resolve the attribute qualifiers into a static set. This prevents more dynamic
+ // Attribute resolutions than necessary when there are multiple unknown patterns
+ // that traverse the same Attribute-based qualifier field.
+ newQuals := make([]Qualifier, len(qualifiers))
+ for i, qual := range qualifiers {
+ attr, isAttr := qual.(Attribute)
+ if isAttr {
+ val, err := attr.Resolve(vars)
+ if err != nil {
+ return nil, err
+ }
+ // If this resolution behavior ever changes, new implementations of the
+ // qualifierValueEquator may be required to handle proper resolution.
+ qual, err = fac.NewQualifier(nil, qual.ID(), val, attr.IsOptional())
+ if err != nil {
+ return nil, err
+ }
+ }
+ newQuals[i] = qual
+ }
+ // Determine whether any of the unknown patterns match.
+ for patIdx := range candidateIndices {
+ pat := patterns[patIdx]
+ isUnk := true
+ matchExprID := attrID
+ qualPats := pat.QualifierPatterns()
+ for i, qual := range newQuals {
+ if i >= len(qualPats) {
+ break
+ }
+ matchExprID = qual.ID()
+ qualPat := qualPats[i]
+ // Note, the AttributeQualifierPattern relies on the input Qualifier not being an
+ // Attribute, since there is no way to resolve the Attribute with the information
+ // provided to the Matches call.
+ if !qualPat.Matches(qual) {
+ isUnk = false
+ break
+ }
+ }
+ if isUnk {
+ attr := types.NewAttributeTrail(pat.variable)
+ for i := 0; i < len(qualPats) && i < len(newQuals); i++ {
+ if qual, ok := newQuals[i].(ConstantQualifier); ok {
+ switch v := qual.Value().Value().(type) {
+ case bool:
+ types.QualifyAttribute[bool](attr, v)
+ case float64:
+ types.QualifyAttribute[int64](attr, int64(v))
+ case int64:
+ types.QualifyAttribute[int64](attr, v)
+ case string:
+ types.QualifyAttribute[string](attr, v)
+ case uint64:
+ types.QualifyAttribute[uint64](attr, v)
+ default:
+ types.QualifyAttribute[string](attr, fmt.Sprintf("%v", v))
+ }
+ } else {
+ types.QualifyAttribute[string](attr, "*")
+ }
+ }
+ return types.NewUnknown(matchExprID, attr), nil
+ }
+ }
+ return nil, nil
+}
+
+// attributeMatcher embeds the NamespacedAttribute interface which allows it to participate in
+// AttributePattern matching against Attribute values without having to modify the code paths that
+// identify Attributes in expressions.
+type attributeMatcher struct {
+ NamespacedAttribute
+ qualifiers []Qualifier
+ fac *partialAttributeFactory
+}
+
+// AddQualifier implements the Attribute interface method.
+func (m *attributeMatcher) AddQualifier(qual Qualifier) (Attribute, error) {
+ // Add the qualifier to the embedded NamespacedAttribute. If the input to the Resolve
+ // method is not a PartialActivation, or does not match an unknown attribute pattern, the
+ // Resolve method is directly invoked on the underlying NamespacedAttribute.
+ _, err := m.NamespacedAttribute.AddQualifier(qual)
+ if err != nil {
+ return nil, err
+ }
+ // The attributeMatcher overloads TryResolve and will attempt to match unknown patterns against
+ // the variable name and qualifier set contained within the Attribute. These values are not
+ // directly inspectable on the top-level NamespacedAttribute interface and so are tracked within
+ // the attributeMatcher.
+ m.qualifiers = append(m.qualifiers, qual)
+ return m, nil
+}
+
+// Resolve is an implementation of the NamespacedAttribute interface method which tests
+// for matching unknown attribute patterns and returns types.Unknown if present. Otherwise,
+// the standard Resolve logic applies.
+func (m *attributeMatcher) Resolve(vars Activation) (any, error) {
+ id := m.NamespacedAttribute.ID()
+ // Bug in how partial activation is resolved, should search parents as well.
+ partial, isPartial := toPartialActivation(vars)
+ if isPartial {
+ unk, err := m.fac.matchesUnknownPatterns(
+ partial,
+ id,
+ m.CandidateVariableNames(),
+ m.qualifiers)
+ if err != nil {
+ return nil, err
+ }
+ if unk != nil {
+ return unk, nil
+ }
+ }
+ return m.NamespacedAttribute.Resolve(vars)
+}
+
+// Qualify is an implementation of the Qualifier interface method.
+func (m *attributeMatcher) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(m.fac, vars, obj, m)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (m *attributeMatcher) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(m.fac, vars, obj, m, presenceOnly)
+}
+
+func toPartialActivation(vars Activation) (PartialActivation, bool) {
+ pv, ok := vars.(PartialActivation)
+ if ok {
+ return pv, true
+ }
+ if vars.Parent() != nil {
+ return toPartialActivation(vars.Parent())
+ }
+ return nil, false
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/attributes.go b/vendor/github.com/google/cel-go/interpreter/attributes.go
new file mode 100644
index 0000000..ca97bdf
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/attributes.go
@@ -0,0 +1,1337 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// AttributeFactory provides methods creating Attribute and Qualifier values.
+type AttributeFactory interface {
+ // AbsoluteAttribute creates an attribute that refers to a top-level variable name.
+ //
+ // Checked expressions generate absolute attribute with a single name.
+ // Parse-only expressions may have more than one possible absolute identifier when the
+ // expression is created within a container, e.g. package or namespace.
+ //
+ // When there is more than one name supplied to the AbsoluteAttribute call, the names
+ // must be in CEL's namespace resolution order. The name arguments provided here are
+ // returned in the same order as they were provided by the NamespacedAttribute
+ // CandidateVariableNames method.
+ AbsoluteAttribute(id int64, names ...string) NamespacedAttribute
+
+ // ConditionalAttribute creates an attribute with two Attribute branches, where the Attribute
+ // that is resolved depends on the boolean evaluation of the input 'expr'.
+ ConditionalAttribute(id int64, expr Interpretable, t, f Attribute) Attribute
+
+ // MaybeAttribute creates an attribute that refers to either a field selection or a namespaced
+ // variable name.
+ //
+ // Only expressions which have not been type-checked may generate oneof attributes.
+ MaybeAttribute(id int64, name string) Attribute
+
+ // RelativeAttribute creates an attribute whose value is a qualification of a dynamic
+ // computation rather than a static variable reference.
+ RelativeAttribute(id int64, operand Interpretable) Attribute
+
+ // NewQualifier creates a qualifier on the target object with a given value.
+ //
+ // The 'val' may be an Attribute or any proto-supported map key type: bool, int, string, uint.
+ //
+ // The qualifier may consider the object type being qualified, if present. If absent, the
+ // qualification should be considered dynamic and the qualification should still work, though
+ // it may be sub-optimal.
+ NewQualifier(objType *types.Type, qualID int64, val any, opt bool) (Qualifier, error)
+}
+
+// Qualifier marker interface for designating different qualifier values and where they appear
+// within field selections and index call expressions (`_[_]`).
+type Qualifier interface {
+ // ID where the qualifier appears within an expression.
+ ID() int64
+
+ // IsOptional specifies whether the qualifier is optional.
+ // Instead of a direct qualification, an optional qualifier will be resolved via QualifyIfPresent
+ // rather than Qualify. A non-optional qualifier may also be resolved through QualifyIfPresent if
+ // the object to qualify is itself optional.
+ IsOptional() bool
+
+ // Qualify performs a qualification, e.g. field selection, on the input object and returns
+ // the value of the access and whether the value was set. A non-nil value with a false presence
+ // test result indicates that the value being returned is the default value.
+ Qualify(vars Activation, obj any) (any, error)
+
+ // QualifyIfPresent qualifies the object if the qualifier is declared or defined on the object.
+ // The 'presenceOnly' flag indicates that the value is not necessary, just a boolean status as
+ // to whether the qualifier is present.
+ QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error)
+}
+
+// ConstantQualifier interface embeds the Qualifier interface and provides an option to inspect the
+// qualifier's constant value.
+//
+// Non-constant qualifiers are of Attribute type.
+type ConstantQualifier interface {
+ Qualifier
+
+ // Value returns the constant value associated with the qualifier.
+ Value() ref.Val
+}
+
+// Attribute values are a variable or value with an optional set of qualifiers, such as field, key,
+// or index accesses.
+type Attribute interface {
+ Qualifier
+
+ // AddQualifier adds a qualifier on the Attribute or error if the qualification is not a valid qualifier type.
+ AddQualifier(Qualifier) (Attribute, error)
+
+ // Resolve returns the value of the Attribute and whether it was present given an Activation.
+ // For objects which support safe traversal, the value may be non-nil and the presence flag be false.
+ //
+ // If an error is encountered during attribute resolution, it will be returned immediately.
+ // If the attribute cannot be resolved within the Activation, the result must be: `nil`, `error`
+ // with the error indicating which variable was missing.
+ Resolve(Activation) (any, error)
+}
+
+// NamespacedAttribute values are a variable within a namespace, and an optional set of qualifiers
+// such as field, key, or index accesses.
+type NamespacedAttribute interface {
+ Attribute
+
+ // CandidateVariableNames returns the possible namespaced variable names for this Attribute in
+ // the CEL namespace resolution order.
+ CandidateVariableNames() []string
+
+ // Qualifiers returns the list of qualifiers associated with the Attribute.
+ Qualifiers() []Qualifier
+}
+
+// NewAttributeFactory returns a default AttributeFactory which is produces Attribute values
+// capable of resolving types by simple names and qualify the values using the supported qualifier
+// types: bool, int, string, and uint.
+func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider) AttributeFactory {
+ return &attrFactory{
+ container: cont,
+ adapter: a,
+ provider: p,
+ }
+}
+
+type attrFactory struct {
+ container *containers.Container
+ adapter types.Adapter
+ provider types.Provider
+}
+
+// AbsoluteAttribute refers to a variable value and an optional qualifier path.
+//
+// The namespaceNames represent the names the variable could have based on namespace
+// resolution rules.
+func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute {
+ return &absoluteAttribute{
+ id: id,
+ namespaceNames: names,
+ qualifiers: []Qualifier{},
+ adapter: r.adapter,
+ provider: r.provider,
+ fac: r,
+ }
+}
+
+// ConditionalAttribute supports the case where an attribute selection may occur on a conditional
+// expression, e.g. (cond ? a : b).c
+func (r *attrFactory) ConditionalAttribute(id int64, expr Interpretable, t, f Attribute) Attribute {
+ return &conditionalAttribute{
+ id: id,
+ expr: expr,
+ truthy: t,
+ falsy: f,
+ adapter: r.adapter,
+ fac: r,
+ }
+}
+
+// MaybeAttribute collects variants of unchecked AbsoluteAttribute values which could either be
+// direct variable accesses or some combination of variable access with qualification.
+func (r *attrFactory) MaybeAttribute(id int64, name string) Attribute {
+ return &maybeAttribute{
+ id: id,
+ attrs: []NamespacedAttribute{
+ r.AbsoluteAttribute(id, r.container.ResolveCandidateNames(name)...),
+ },
+ adapter: r.adapter,
+ provider: r.provider,
+ fac: r,
+ }
+}
+
+// RelativeAttribute refers to an expression and an optional qualifier path.
+func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribute {
+ return &relativeAttribute{
+ id: id,
+ operand: operand,
+ qualifiers: []Qualifier{},
+ adapter: r.adapter,
+ fac: r,
+ }
+}
+
+// NewQualifier is an implementation of the AttributeFactory interface.
+func (r *attrFactory) NewQualifier(objType *types.Type, qualID int64, val any, opt bool) (Qualifier, error) {
+ // Before creating a new qualifier check to see if this is a protobuf message field access.
+ // If so, use the precomputed GetFrom qualification method rather than the standard
+ // stringQualifier.
+ str, isStr := val.(string)
+ if isStr && objType != nil && objType.Kind() == types.StructKind {
+ ft, found := r.provider.FindStructFieldType(objType.TypeName(), str)
+ if found && ft.IsSet != nil && ft.GetFrom != nil {
+ return &fieldQualifier{
+ id: qualID,
+ Name: str,
+ FieldType: ft,
+ adapter: r.adapter,
+ optional: opt,
+ }, nil
+ }
+ }
+ return newQualifier(r.adapter, qualID, val, opt)
+}
+
+type absoluteAttribute struct {
+ id int64
+ // namespaceNames represent the names the variable could have based on declared container
+ // (package) of the expression.
+ namespaceNames []string
+ qualifiers []Qualifier
+ adapter types.Adapter
+ provider types.Provider
+ fac AttributeFactory
+}
+
+// ID implements the Attribute interface method.
+func (a *absoluteAttribute) ID() int64 {
+ qualCount := len(a.qualifiers)
+ if qualCount == 0 {
+ return a.id
+ }
+ return a.qualifiers[qualCount-1].ID()
+}
+
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *absoluteAttribute) IsOptional() bool {
+ return false
+}
+
+// AddQualifier implements the Attribute interface method.
+func (a *absoluteAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
+ a.qualifiers = append(a.qualifiers, qual)
+ return a, nil
+}
+
+// CandidateVariableNames implements the NamespaceAttribute interface method.
+func (a *absoluteAttribute) CandidateVariableNames() []string {
+ return a.namespaceNames
+}
+
+// Qualifiers returns the list of Qualifier instances associated with the namespaced attribute.
+func (a *absoluteAttribute) Qualifiers() []Qualifier {
+ return a.qualifiers
+}
+
+// Qualify is an implementation of the Qualifier interface method.
+func (a *absoluteAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *absoluteAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
+}
+
+// String implements the Stringer interface method.
+func (a *absoluteAttribute) String() string {
+ return fmt.Sprintf("id: %v, names: %v", a.id, a.namespaceNames)
+}
+
+// Resolve returns the resolved Attribute value given the Activation, or error if the Attribute
+// variable is not found, or if its Qualifiers cannot be applied successfully.
+//
+// If the variable name cannot be found as an Activation variable or in the TypeProvider as
+// a type, then the result is `nil`, `error` with the error indicating the name of the first
+// variable searched as missing.
+func (a *absoluteAttribute) Resolve(vars Activation) (any, error) {
+ for _, nm := range a.namespaceNames {
+ // If the variable is found, process it. Otherwise, wait until the checks to
+ // determine whether the type is unknown before returning.
+ obj, found := vars.ResolveName(nm)
+ if found {
+ obj, isOpt, err := applyQualifiers(vars, obj, a.qualifiers)
+ if err != nil {
+ return nil, err
+ }
+ if isOpt {
+ val := a.adapter.NativeToValue(obj)
+ if types.IsUnknown(val) {
+ return val, nil
+ }
+ return types.OptionalOf(val), nil
+ }
+ return obj, nil
+ }
+ // Attempt to resolve the qualified type name if the name is not a variable identifier.
+ typ, found := a.provider.FindIdent(nm)
+ if found {
+ if len(a.qualifiers) == 0 {
+ return typ, nil
+ }
+ }
+ }
+ var attrNames strings.Builder
+ for i, nm := range a.namespaceNames {
+ if i != 0 {
+ attrNames.WriteString(", ")
+ }
+ attrNames.WriteString(nm)
+ }
+ return nil, missingAttribute(attrNames.String())
+}
+
+type conditionalAttribute struct {
+ id int64
+ expr Interpretable
+ truthy Attribute
+ falsy Attribute
+ adapter types.Adapter
+ fac AttributeFactory
+}
+
+// ID is an implementation of the Attribute interface method.
+func (a *conditionalAttribute) ID() int64 {
+ // There's a field access after the conditional.
+ if a.truthy.ID() == a.falsy.ID() {
+ return a.truthy.ID()
+ }
+ // Otherwise return the conditional id as the consistent id being tracked.
+ return a.id
+}
+
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *conditionalAttribute) IsOptional() bool {
+ return false
+}
+
+// AddQualifier appends the same qualifier to both sides of the conditional, in effect managing
+// the qualification of alternate attributes.
+func (a *conditionalAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
+ _, err := a.truthy.AddQualifier(qual)
+ if err != nil {
+ return nil, err
+ }
+ _, err = a.falsy.AddQualifier(qual)
+ if err != nil {
+ return nil, err
+ }
+ return a, nil
+}
+
+// Qualify is an implementation of the Qualifier interface method.
+func (a *conditionalAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *conditionalAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
+}
+
+// Resolve evaluates the condition, and then resolves the truthy or falsy branch accordingly.
+func (a *conditionalAttribute) Resolve(vars Activation) (any, error) {
+ val := a.expr.Eval(vars)
+ if val == types.True {
+ return a.truthy.Resolve(vars)
+ }
+ if val == types.False {
+ return a.falsy.Resolve(vars)
+ }
+ if types.IsUnknown(val) {
+ return val, nil
+ }
+ return nil, types.MaybeNoSuchOverloadErr(val).(*types.Err)
+}
+
+// String is an implementation of the Stringer interface method.
+func (a *conditionalAttribute) String() string {
+ return fmt.Sprintf("id: %v, truthy attribute: %v, falsy attribute: %v", a.id, a.truthy, a.falsy)
+}
+
+type maybeAttribute struct {
+ id int64
+ attrs []NamespacedAttribute
+ adapter types.Adapter
+ provider types.Provider
+ fac AttributeFactory
+}
+
+// ID is an implementation of the Attribute interface method.
+func (a *maybeAttribute) ID() int64 {
+ return a.attrs[0].ID()
+}
+
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *maybeAttribute) IsOptional() bool {
+ return false
+}
+
+// AddQualifier adds a qualifier to each possible attribute variant, and also creates
+// a new namespaced variable from the qualified value.
+//
+// The algorithm for building the maybe attribute is as follows:
+//
+// 1. Create a maybe attribute from a simple identifier when it occurs in a parsed-only expression
+//
+// mb = MaybeAttribute(, "a")
+//
+// Initializing the maybe attribute creates an absolute attribute internally which includes the
+// possible namespaced names of the attribute. In this example, let's assume we are in namespace
+// 'ns', then the maybe is either one of the following variable names:
+//
+// possible variables names -- ns.a, a
+//
+// 2. Adding a qualifier to the maybe means that the variable name could be a longer qualified
+// name, or a field selection on one of the possible variable names produced earlier:
+//
+// mb.AddQualifier("b")
+//
+// possible variables names -- ns.a.b, a.b
+// possible field selection -- ns.a['b'], a['b']
+//
+// If none of the attributes within the maybe resolves a value, the result is an error.
+func (a *maybeAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
+ str := ""
+ isStr := false
+ cq, isConst := qual.(ConstantQualifier)
+ if isConst {
+ str, isStr = cq.Value().Value().(string)
+ }
+ var augmentedNames []string
+ // First add the qualifier to all existing attributes in the oneof.
+ for _, attr := range a.attrs {
+ if isStr && len(attr.Qualifiers()) == 0 {
+ candidateVars := attr.CandidateVariableNames()
+ augmentedNames = make([]string, len(candidateVars))
+ for i, name := range candidateVars {
+ augmentedNames[i] = fmt.Sprintf("%s.%s", name, str)
+ }
+ }
+ _, err := attr.AddQualifier(qual)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Next, ensure the most specific variable / type reference is searched first.
+ if len(augmentedNames) != 0 {
+ a.attrs = append([]NamespacedAttribute{a.fac.AbsoluteAttribute(qual.ID(), augmentedNames...)}, a.attrs...)
+ }
+ return a, nil
+}
+
+// Qualify is an implementation of the Qualifier interface method.
+func (a *maybeAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *maybeAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
+}
+
+// Resolve follows the variable resolution rules to determine whether the attribute is a variable
+// or a field selection.
+func (a *maybeAttribute) Resolve(vars Activation) (any, error) {
+ var maybeErr error
+ for _, attr := range a.attrs {
+ obj, err := attr.Resolve(vars)
+ // Return an error if one is encountered.
+ if err != nil {
+ resErr, ok := err.(*resolutionError)
+ if !ok {
+ return nil, err
+ }
+ // If this was not a missing variable error, return it.
+ if !resErr.isMissingAttribute() {
+ return nil, err
+ }
+ // When the variable is missing in a maybe attribute we defer erroring.
+ if maybeErr == nil {
+ maybeErr = resErr
+ }
+ // Continue attempting to resolve possible variables.
+ continue
+ }
+ return obj, nil
+ }
+ // Else, produce a no such attribute error.
+ return nil, maybeErr
+}
+
+// String is an implementation of the Stringer interface method.
+func (a *maybeAttribute) String() string {
+ return fmt.Sprintf("id: %v, attributes: %v", a.id, a.attrs)
+}
+
+type relativeAttribute struct {
+ id int64
+ operand Interpretable
+ qualifiers []Qualifier
+ adapter types.Adapter
+ fac AttributeFactory
+}
+
+// ID is an implementation of the Attribute interface method.
+func (a *relativeAttribute) ID() int64 {
+ qualCount := len(a.qualifiers)
+ if qualCount == 0 {
+ return a.id
+ }
+ return a.qualifiers[qualCount-1].ID()
+}
+
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *relativeAttribute) IsOptional() bool {
+ return false
+}
+
+// AddQualifier implements the Attribute interface method.
+func (a *relativeAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
+ a.qualifiers = append(a.qualifiers, qual)
+ return a, nil
+}
+
+// Qualify is an implementation of the Qualifier interface method.
+func (a *relativeAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *relativeAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
+}
+
+// Resolve expression value and qualifier relative to the expression result.
+func (a *relativeAttribute) Resolve(vars Activation) (any, error) {
+ // First, evaluate the operand.
+ v := a.operand.Eval(vars)
+ if types.IsError(v) {
+ return nil, v.(*types.Err)
+ }
+ if types.IsUnknown(v) {
+ return v, nil
+ }
+ obj, isOpt, err := applyQualifiers(vars, v, a.qualifiers)
+ if err != nil {
+ return nil, err
+ }
+ if isOpt {
+ val := a.adapter.NativeToValue(obj)
+ if types.IsUnknown(val) {
+ return val, nil
+ }
+ return types.OptionalOf(val), nil
+ }
+ return obj, nil
+}
+
+// String is an implementation of the Stringer interface method.
+func (a *relativeAttribute) String() string {
+ return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand)
+}
+
+func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, error) {
+ var qual Qualifier
+ switch val := v.(type) {
+ case Attribute:
+ // Note, attributes are initially identified as non-optional since they represent a top-level
+ // field access; however, when used as a relative qualifier, e.g. a[?b.c], then an attrQualifier
+ // is created which intercepts the IsOptional check for the attribute in order to return the
+ // correct result.
+ return &attrQualifier{
+ id: id,
+ Attribute: val,
+ optional: opt,
+ }, nil
+ case string:
+ qual = &stringQualifier{
+ id: id,
+ value: val,
+ celValue: types.String(val),
+ adapter: adapter,
+ optional: opt,
+ }
+ case int:
+ qual = &intQualifier{
+ id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt,
+ }
+ case int32:
+ qual = &intQualifier{
+ id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt,
+ }
+ case int64:
+ qual = &intQualifier{
+ id: id, value: val, celValue: types.Int(val), adapter: adapter, optional: opt,
+ }
+ case uint:
+ qual = &uintQualifier{
+ id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt,
+ }
+ case uint32:
+ qual = &uintQualifier{
+ id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt,
+ }
+ case uint64:
+ qual = &uintQualifier{
+ id: id, value: val, celValue: types.Uint(val), adapter: adapter, optional: opt,
+ }
+ case bool:
+ qual = &boolQualifier{
+ id: id, value: val, celValue: types.Bool(val), adapter: adapter, optional: opt,
+ }
+ case float32:
+ qual = &doubleQualifier{
+ id: id,
+ value: float64(val),
+ celValue: types.Double(val),
+ adapter: adapter,
+ optional: opt,
+ }
+ case float64:
+ qual = &doubleQualifier{
+ id: id, value: val, celValue: types.Double(val), adapter: adapter, optional: opt,
+ }
+ case types.String:
+ qual = &stringQualifier{
+ id: id, value: string(val), celValue: val, adapter: adapter, optional: opt,
+ }
+ case types.Int:
+ qual = &intQualifier{
+ id: id, value: int64(val), celValue: val, adapter: adapter, optional: opt,
+ }
+ case types.Uint:
+ qual = &uintQualifier{
+ id: id, value: uint64(val), celValue: val, adapter: adapter, optional: opt,
+ }
+ case types.Bool:
+ qual = &boolQualifier{
+ id: id, value: bool(val), celValue: val, adapter: adapter, optional: opt,
+ }
+ case types.Double:
+ qual = &doubleQualifier{
+ id: id, value: float64(val), celValue: val, adapter: adapter, optional: opt,
+ }
+ case *types.Unknown:
+ qual = &unknownQualifier{id: id, value: val}
+ default:
+ if q, ok := v.(Qualifier); ok {
+ return q, nil
+ }
+ return nil, fmt.Errorf("invalid qualifier type: %T", v)
+ }
+ return qual, nil
+}
+
+type attrQualifier struct {
+ id int64
+ Attribute
+ optional bool
+}
+
+// ID implements the Qualifier interface method and returns the qualification instruction id
+// rather than the attribute id.
+func (q *attrQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional implements the Qualifier interface method.
+func (q *attrQualifier) IsOptional() bool {
+ return q.optional
+}
+
+type stringQualifier struct {
+ id int64
+ value string
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *stringQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional implements the Qualifier interface method.
+func (q *stringQualifier) IsOptional() bool {
+ return q.optional
+}
+
+// Qualify implements the Qualifier interface method.
+func (q *stringQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *stringQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *stringQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
+ s := q.value
+ switch o := obj.(type) {
+ case map[string]any:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]string:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]int:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]int32:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]int64:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]uint:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]uint32:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]uint64:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]float32:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]float64:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]bool:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ default:
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingKey(q.celValue)
+}
+
+// Value implements the ConstantQualifier interface
+func (q *stringQualifier) Value() ref.Val {
+ return q.celValue
+}
+
+type intQualifier struct {
+ id int64
+ value int64
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *intQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional implements the Qualifier interface method.
+func (q *intQualifier) IsOptional() bool {
+ return q.optional
+}
+
+// Qualify implements the Qualifier interface method.
+func (q *intQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *intQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *intQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
+ i := q.value
+ var isMap bool
+ switch o := obj.(type) {
+ // The specialized map types supported by an int qualifier are considerably fewer than the set
+ // of specialized map types supported by string qualifiers since they are less frequently used
+ // than string-based map keys. Additional specializations may be added in the future if
+ // desired.
+ case map[int]any:
+ isMap = true
+ obj, isKey := o[int(i)]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[int32]any:
+ isMap = true
+ obj, isKey := o[int32(i)]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[int64]any:
+ isMap = true
+ obj, isKey := o[i]
+ if isKey {
+ return obj, true, nil
+ }
+ case []any:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []string:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []int:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []int32:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []int64:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []uint:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []uint32:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []uint64:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []float32:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []float64:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []bool:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ default:
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ if isMap {
+ return nil, false, missingKey(q.celValue)
+ }
+ return nil, false, missingIndex(q.celValue)
+}
+
+// Value implements the ConstantQualifier interface
+func (q *intQualifier) Value() ref.Val {
+ return q.celValue
+}
+
+type uintQualifier struct {
+ id int64
+ value uint64
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *uintQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional implements the Qualifier interface method.
+func (q *uintQualifier) IsOptional() bool {
+ return q.optional
+}
+
+// Qualify implements the Qualifier interface method.
+func (q *uintQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *uintQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *uintQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
+ u := q.value
+ switch o := obj.(type) {
+ // The specialized map types supported by a uint qualifier are considerably fewer than the set
+ // of specialized map types supported by string qualifiers since they are less frequently used
+ // than string-based map keys. Additional specializations may be added in the future if
+ // desired.
+ case map[uint]any:
+ obj, isKey := o[uint(u)]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[uint32]any:
+ obj, isKey := o[uint32(u)]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[uint64]any:
+ obj, isKey := o[u]
+ if isKey {
+ return obj, true, nil
+ }
+ default:
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingKey(q.celValue)
+}
+
+// Value implements the ConstantQualifier interface
+func (q *uintQualifier) Value() ref.Val {
+ return q.celValue
+}
+
+type boolQualifier struct {
+ id int64
+ value bool
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *boolQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional implements the Qualifier interface method.
+func (q *boolQualifier) IsOptional() bool {
+ return q.optional
+}
+
+// Qualify implements the Qualifier interface method.
+func (q *boolQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *boolQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *boolQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
+ b := q.value
+ switch o := obj.(type) {
+ case map[bool]any:
+ obj, isKey := o[b]
+ if isKey {
+ return obj, true, nil
+ }
+ default:
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingKey(q.celValue)
+}
+
+// Value implements the ConstantQualifier interface
+func (q *boolQualifier) Value() ref.Val {
+ return q.celValue
+}
+
+// fieldQualifier indicates that the qualification is a well-defined field with a known
+// field type. When the field type is known this can be used to improve the speed and
+// efficiency of field resolution.
+type fieldQualifier struct {
+ id int64
+ Name string
+ FieldType *types.FieldType
+ adapter types.Adapter
+ optional bool
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *fieldQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional implements the Qualifier interface method.
+func (q *fieldQualifier) IsOptional() bool {
+ return q.optional
+}
+
+// Qualify implements the Qualifier interface method.
+func (q *fieldQualifier) Qualify(vars Activation, obj any) (any, error) {
+ if rv, ok := obj.(ref.Val); ok {
+ obj = rv.Value()
+ }
+ val, err := q.FieldType.GetFrom(obj)
+ if err != nil {
+ return nil, err
+ }
+ return val, nil
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *fieldQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ if rv, ok := obj.(ref.Val); ok {
+ obj = rv.Value()
+ }
+ if !q.FieldType.IsSet(obj) {
+ return nil, false, nil
+ }
+ if presenceOnly {
+ return nil, true, nil
+ }
+ val, err := q.FieldType.GetFrom(obj)
+ if err != nil {
+ return nil, false, err
+ }
+ return val, true, nil
+}
+
+// Value implements the ConstantQualifier interface
+func (q *fieldQualifier) Value() ref.Val {
+ return types.String(q.Name)
+}
+
+// doubleQualifier qualifies a CEL object, map, or list using a double value.
+//
+// This qualifier is used for working with dynamic data like JSON or protobuf.Any where the value
+// type may not be known ahead of time and may not conform to the standard types supported as valid
+// protobuf map key types.
+type doubleQualifier struct {
+ id int64
+ value float64
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *doubleQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional implements the Qualifier interface method.
+func (q *doubleQualifier) IsOptional() bool {
+ return q.optional
+}
+
+// Qualify implements the Qualifier interface method.
+func (q *doubleQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+func (q *doubleQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *doubleQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
+}
+
+// Value implements the ConstantQualifier interface
+func (q *doubleQualifier) Value() ref.Val {
+ return q.celValue
+}
+
+// unknownQualifier is a simple qualifier which always returns a preconfigured set of unknown values
+// for any value subject to qualification. This is consistent with CEL's unknown handling elsewhere.
+type unknownQualifier struct {
+ id int64
+ value *types.Unknown
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *unknownQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional returns trivially false as an the unknown value is always returned.
+func (q *unknownQualifier) IsOptional() bool {
+ return false
+}
+
+// Qualify returns the unknown value associated with this qualifier.
+func (q *unknownQualifier) Qualify(vars Activation, obj any) (any, error) {
+ return q.value, nil
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *unknownQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.value, true, nil
+}
+
+// Value implements the ConstantQualifier interface
+func (q *unknownQualifier) Value() ref.Val {
+ return q.value
+}
+
+func applyQualifiers(vars Activation, obj any, qualifiers []Qualifier) (any, bool, error) {
+ optObj, isOpt := obj.(*types.Optional)
+ if isOpt {
+ if !optObj.HasValue() {
+ return optObj, false, nil
+ }
+ obj = optObj.GetValue().Value()
+ }
+
+ var err error
+ for _, qual := range qualifiers {
+ var qualObj any
+ isOpt = isOpt || qual.IsOptional()
+ if isOpt {
+ var present bool
+ qualObj, present, err = qual.QualifyIfPresent(vars, obj, false)
+ if err != nil {
+ return nil, false, err
+ }
+ if !present {
+ // We return optional none here with a presence of 'false' as the layers
+ // above will attempt to call types.OptionalOf() on a present value if any
+ // of the qualifiers is optional.
+ return types.OptionalNone, false, nil
+ }
+ } else {
+ qualObj, err = qual.Qualify(vars, obj)
+ if err != nil {
+ return nil, false, err
+ }
+ }
+ obj = qualObj
+ }
+ return obj, isOpt, nil
+}
+
+// attrQualify performs a qualification using the result of an attribute evaluation.
+func attrQualify(fac AttributeFactory, vars Activation, obj any, qualAttr Attribute) (any, error) {
+ val, err := qualAttr.Resolve(vars)
+ if err != nil {
+ return nil, err
+ }
+ qual, err := fac.NewQualifier(nil, qualAttr.ID(), val, qualAttr.IsOptional())
+ if err != nil {
+ return nil, err
+ }
+ return qual.Qualify(vars, obj)
+}
+
+// attrQualifyIfPresent conditionally performs the qualification of the result of attribute is present
+// on the target object.
+func attrQualifyIfPresent(fac AttributeFactory, vars Activation, obj any, qualAttr Attribute,
+ presenceOnly bool) (any, bool, error) {
+ val, err := qualAttr.Resolve(vars)
+ if err != nil {
+ return nil, false, err
+ }
+ qual, err := fac.NewQualifier(nil, qualAttr.ID(), val, qualAttr.IsOptional())
+ if err != nil {
+ return nil, false, err
+ }
+ return qual.QualifyIfPresent(vars, obj, presenceOnly)
+}
+
+// refQualify attempts to convert the value to a CEL value and then uses reflection methods to try and
+// apply the qualifier with the option to presence test field accesses before retrieving field values.
+func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly bool) (ref.Val, bool, error) {
+ celVal := adapter.NativeToValue(obj)
+ switch v := celVal.(type) {
+ case *types.Unknown:
+ return v, true, nil
+ case *types.Err:
+ return nil, false, v
+ case traits.Mapper:
+ val, found := v.Find(idx)
+ // If the index is of the wrong type for the map, then it is possible
+ // for the Find call to produce an error.
+ if types.IsError(val) {
+ return nil, false, val.(*types.Err)
+ }
+ if found {
+ return val, true, nil
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingKey(idx)
+ case traits.Lister:
+ // If the index argument is not a valid numeric type, then it is possible
+ // for the index operation to produce an error.
+ i, err := types.IndexOrError(idx)
+ if err != nil {
+ return nil, false, err
+ }
+ celIndex := types.Int(i)
+ if i >= 0 && celIndex < v.Size().(types.Int) {
+ return v.Get(idx), true, nil
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingIndex(idx)
+ case traits.Indexer:
+ if presenceTest {
+ ft, ok := v.(traits.FieldTester)
+ if ok {
+ presence := ft.IsSet(idx)
+ if types.IsError(presence) {
+ return nil, false, presence.(*types.Err)
+ }
+ // If not found or presence only test, then return.
+ // Otherwise, if found, obtain the value later on.
+ if presenceOnly || presence == types.False {
+ return nil, presence == types.True, nil
+ }
+ }
+ }
+ val := v.Get(idx)
+ if types.IsError(val) {
+ return nil, false, val.(*types.Err)
+ }
+ return val, true, nil
+ default:
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingKey(idx)
+ }
+}
+
+// resolutionError is a custom error type which encodes the different error states which may
+// occur during attribute resolution.
+type resolutionError struct {
+ missingAttribute string
+ missingIndex ref.Val
+ missingKey ref.Val
+}
+
+func (e *resolutionError) isMissingAttribute() bool {
+ return e.missingAttribute != ""
+}
+
+func missingIndex(missing ref.Val) *resolutionError {
+ return &resolutionError{
+ missingIndex: missing,
+ }
+}
+
+func missingKey(missing ref.Val) *resolutionError {
+ return &resolutionError{
+ missingKey: missing,
+ }
+}
+
+func missingAttribute(attr string) *resolutionError {
+ return &resolutionError{
+ missingAttribute: attr,
+ }
+}
+
+// Error implements the error interface method.
+func (e *resolutionError) Error() string {
+ if e.missingKey != nil {
+ return fmt.Sprintf("no such key: %v", e.missingKey)
+ }
+ if e.missingIndex != nil {
+ return fmt.Sprintf("index out of bounds: %v", e.missingIndex)
+ }
+ if e.missingAttribute != "" {
+ return fmt.Sprintf("no such attribute(s): %s", e.missingAttribute)
+ }
+ return "invalid attribute"
+}
+
+// Is implements the errors.Is() method used by more recent versions of Go.
+func (e *resolutionError) Is(err error) bool {
+ return err.Error() == e.Error()
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/decorators.go b/vendor/github.com/google/cel-go/interpreter/decorators.go
new file mode 100644
index 0000000..502db35
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/decorators.go
@@ -0,0 +1,272 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// InterpretableDecorator is a functional interface for decorating or replacing
+// Interpretable expression nodes at construction time.
+type InterpretableDecorator func(Interpretable) (Interpretable, error)
+
+// decObserveEval records evaluation state into an EvalState object.
+func decObserveEval(observer EvalObserver) InterpretableDecorator {
+ return func(i Interpretable) (Interpretable, error) {
+ switch inst := i.(type) {
+ case *evalWatch, *evalWatchAttr, *evalWatchConst, *evalWatchConstructor:
+ // these instruction are already watching, return straight-away.
+ return i, nil
+ case InterpretableAttribute:
+ return &evalWatchAttr{
+ InterpretableAttribute: inst,
+ observer: observer,
+ }, nil
+ case InterpretableConst:
+ return &evalWatchConst{
+ InterpretableConst: inst,
+ observer: observer,
+ }, nil
+ case InterpretableConstructor:
+ return &evalWatchConstructor{
+ constructor: inst,
+ observer: observer,
+ }, nil
+ default:
+ return &evalWatch{
+ Interpretable: i,
+ observer: observer,
+ }, nil
+ }
+ }
+}
+
+// decInterruptFolds creates an intepretable decorator which marks comprehensions as interruptable
+// where the interrupt state is communicated via a hidden variable on the Activation.
+func decInterruptFolds() InterpretableDecorator {
+ return func(i Interpretable) (Interpretable, error) {
+ fold, ok := i.(*evalFold)
+ if !ok {
+ return i, nil
+ }
+ fold.interruptable = true
+ return fold, nil
+ }
+}
+
+// decDisableShortcircuits ensures that all branches of an expression will be evaluated, no short-circuiting.
+func decDisableShortcircuits() InterpretableDecorator {
+ return func(i Interpretable) (Interpretable, error) {
+ switch expr := i.(type) {
+ case *evalOr:
+ return &evalExhaustiveOr{
+ id: expr.id,
+ terms: expr.terms,
+ }, nil
+ case *evalAnd:
+ return &evalExhaustiveAnd{
+ id: expr.id,
+ terms: expr.terms,
+ }, nil
+ case *evalFold:
+ expr.exhaustive = true
+ return expr, nil
+ case InterpretableAttribute:
+ cond, isCond := expr.Attr().(*conditionalAttribute)
+ if isCond {
+ return &evalExhaustiveConditional{
+ id: cond.id,
+ attr: cond,
+ adapter: expr.Adapter(),
+ }, nil
+ }
+ }
+ return i, nil
+ }
+}
+
+// decOptimize optimizes the program plan by looking for common evaluation patterns and
+// conditionally precomputing the result.
+// - build list and map values with constant elements.
+// - convert 'in' operations to set membership tests if possible.
+func decOptimize() InterpretableDecorator {
+ return func(i Interpretable) (Interpretable, error) {
+ switch inst := i.(type) {
+ case *evalList:
+ return maybeBuildListLiteral(i, inst)
+ case *evalMap:
+ return maybeBuildMapLiteral(i, inst)
+ case InterpretableCall:
+ if inst.OverloadID() == overloads.InList {
+ return maybeOptimizeSetMembership(i, inst)
+ }
+ if overloads.IsTypeConversionFunction(inst.Function()) {
+ return maybeOptimizeConstUnary(i, inst)
+ }
+ }
+ return i, nil
+ }
+}
+
+// decRegexOptimizer compiles regex pattern string constants.
+func decRegexOptimizer(regexOptimizations ...*RegexOptimization) InterpretableDecorator {
+ functionMatchMap := make(map[string]*RegexOptimization)
+ overloadMatchMap := make(map[string]*RegexOptimization)
+ for _, m := range regexOptimizations {
+ functionMatchMap[m.Function] = m
+ if m.OverloadID != "" {
+ overloadMatchMap[m.OverloadID] = m
+ }
+ }
+
+ return func(i Interpretable) (Interpretable, error) {
+ call, ok := i.(InterpretableCall)
+ if !ok {
+ return i, nil
+ }
+
+ var matcher *RegexOptimization
+ var found bool
+ if call.OverloadID() != "" {
+ matcher, found = overloadMatchMap[call.OverloadID()]
+ }
+ if !found {
+ matcher, found = functionMatchMap[call.Function()]
+ }
+ if !found || matcher.RegexIndex >= len(call.Args()) {
+ return i, nil
+ }
+ args := call.Args()
+ regexArg := args[matcher.RegexIndex]
+ regexStr, isConst := regexArg.(InterpretableConst)
+ if !isConst {
+ return i, nil
+ }
+ pattern, ok := regexStr.Value().(types.String)
+ if !ok {
+ return i, nil
+ }
+ return matcher.Factory(call, string(pattern))
+ }
+}
+
+func maybeOptimizeConstUnary(i Interpretable, call InterpretableCall) (Interpretable, error) {
+ args := call.Args()
+ if len(args) != 1 {
+ return i, nil
+ }
+ _, isConst := args[0].(InterpretableConst)
+ if !isConst {
+ return i, nil
+ }
+ val := call.Eval(EmptyActivation())
+ if types.IsError(val) {
+ return nil, val.(*types.Err)
+ }
+ return NewConstValue(call.ID(), val), nil
+}
+
+func maybeBuildListLiteral(i Interpretable, l *evalList) (Interpretable, error) {
+ for _, elem := range l.elems {
+ _, isConst := elem.(InterpretableConst)
+ if !isConst {
+ return i, nil
+ }
+ }
+ return NewConstValue(l.ID(), l.Eval(EmptyActivation())), nil
+}
+
+func maybeBuildMapLiteral(i Interpretable, mp *evalMap) (Interpretable, error) {
+ for idx, key := range mp.keys {
+ _, isConst := key.(InterpretableConst)
+ if !isConst {
+ return i, nil
+ }
+ _, isConst = mp.vals[idx].(InterpretableConst)
+ if !isConst {
+ return i, nil
+ }
+ }
+ return NewConstValue(mp.ID(), mp.Eval(EmptyActivation())), nil
+}
+
+// maybeOptimizeSetMembership may convert an 'in' operation against a list to map key membership
+// test if the following conditions are true:
+// - the list is a constant with homogeneous element types.
+// - the elements are all of primitive type.
+func maybeOptimizeSetMembership(i Interpretable, inlist InterpretableCall) (Interpretable, error) {
+ args := inlist.Args()
+ lhs := args[0]
+ rhs := args[1]
+ l, isConst := rhs.(InterpretableConst)
+ if !isConst {
+ return i, nil
+ }
+ // When the incoming binary call is flagged with as the InList overload, the value will
+ // always be convertible to a `traits.Lister` type.
+ list := l.Value().(traits.Lister)
+ if list.Size() == types.IntZero {
+ return NewConstValue(inlist.ID(), types.False), nil
+ }
+ it := list.Iterator()
+ valueSet := make(map[ref.Val]ref.Val)
+ for it.HasNext() == types.True {
+ elem := it.Next()
+ if !types.IsPrimitiveType(elem) || elem.Type() == types.BytesType {
+ // Note, non-primitive type are not yet supported, and []byte isn't hashable.
+ return i, nil
+ }
+ valueSet[elem] = types.True
+ switch ev := elem.(type) {
+ case types.Double:
+ iv := ev.ConvertToType(types.IntType)
+ // Ensure that only lossless conversions are added to the set
+ if !types.IsError(iv) && iv.Equal(ev) == types.True {
+ valueSet[iv] = types.True
+ }
+ // Ensure that only lossless conversions are added to the set
+ uv := ev.ConvertToType(types.UintType)
+ if !types.IsError(uv) && uv.Equal(ev) == types.True {
+ valueSet[uv] = types.True
+ }
+ case types.Int:
+ dv := ev.ConvertToType(types.DoubleType)
+ if !types.IsError(dv) {
+ valueSet[dv] = types.True
+ }
+ uv := ev.ConvertToType(types.UintType)
+ if !types.IsError(uv) {
+ valueSet[uv] = types.True
+ }
+ case types.Uint:
+ dv := ev.ConvertToType(types.DoubleType)
+ if !types.IsError(dv) {
+ valueSet[dv] = types.True
+ }
+ iv := ev.ConvertToType(types.IntType)
+ if !types.IsError(iv) {
+ valueSet[iv] = types.True
+ }
+ }
+ }
+ return &evalSetMembership{
+ inst: inlist,
+ arg: lhs,
+ valueSet: valueSet,
+ }, nil
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/dispatcher.go b/vendor/github.com/google/cel-go/interpreter/dispatcher.go
new file mode 100644
index 0000000..8f0bdb7
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/dispatcher.go
@@ -0,0 +1,100 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common/functions"
+)
+
+// Dispatcher resolves function calls to their appropriate overload.
+type Dispatcher interface {
+ // Add one or more overloads, returning an error if any Overload has the same Overload#Name.
+ Add(overloads ...*functions.Overload) error
+
+ // FindOverload returns an Overload definition matching the provided name.
+ FindOverload(overload string) (*functions.Overload, bool)
+
+ // OverloadIds returns the set of all overload identifiers configured for dispatch.
+ OverloadIds() []string
+}
+
+// NewDispatcher returns an empty Dispatcher instance.
+func NewDispatcher() Dispatcher {
+ return &defaultDispatcher{
+ overloads: make(map[string]*functions.Overload)}
+}
+
+// ExtendDispatcher returns a Dispatcher which inherits the overloads of its parent, and
+// provides an isolation layer between built-ins and extension functions which is useful
+// for forward compatibility.
+func ExtendDispatcher(parent Dispatcher) Dispatcher {
+ return &defaultDispatcher{
+ parent: parent,
+ overloads: make(map[string]*functions.Overload)}
+}
+
+// overloadMap helper type for indexing overloads by function name.
+type overloadMap map[string]*functions.Overload
+
+// defaultDispatcher struct which contains an overload map.
+type defaultDispatcher struct {
+ parent Dispatcher
+ overloads overloadMap
+}
+
+// Add implements the Dispatcher.Add interface method.
+func (d *defaultDispatcher) Add(overloads ...*functions.Overload) error {
+ for _, o := range overloads {
+ // add the overload unless an overload of the same name has already been provided.
+ if _, found := d.overloads[o.Operator]; found {
+ return fmt.Errorf("overload already exists '%s'", o.Operator)
+ }
+ // index the overload by function name.
+ d.overloads[o.Operator] = o
+ }
+ return nil
+}
+
+// FindOverload implements the Dispatcher.FindOverload interface method.
+func (d *defaultDispatcher) FindOverload(overload string) (*functions.Overload, bool) {
+ o, found := d.overloads[overload]
+ // Attempt to dispatch to an overload defined in the parent.
+ if !found && d.parent != nil {
+ return d.parent.FindOverload(overload)
+ }
+ return o, found
+}
+
+// OverloadIds implements the Dispatcher interface method.
+func (d *defaultDispatcher) OverloadIds() []string {
+ i := 0
+ overloads := make([]string, len(d.overloads))
+ for name := range d.overloads {
+ overloads[i] = name
+ i++
+ }
+ if d.parent == nil {
+ return overloads
+ }
+ parentOverloads := d.parent.OverloadIds()
+ for _, pName := range parentOverloads {
+ if _, found := d.overloads[pName]; !found {
+ overloads = append(overloads, pName)
+ }
+ }
+ return overloads
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/evalstate.go b/vendor/github.com/google/cel-go/interpreter/evalstate.go
new file mode 100644
index 0000000..4bdd1fd
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/evalstate.go
@@ -0,0 +1,79 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// EvalState tracks the values associated with expression ids during execution.
+type EvalState interface {
+ // IDs returns the list of ids with recorded values.
+ IDs() []int64
+
+ // Value returns the observed value of the given expression id if found, and a nil false
+ // result if not.
+ Value(int64) (ref.Val, bool)
+
+ // SetValue sets the observed value of the expression id.
+ SetValue(int64, ref.Val)
+
+ // Reset clears the previously recorded expression values.
+ Reset()
+}
+
+// evalState permits the mutation of evaluation state for a given expression id.
+type evalState struct {
+ values map[int64]ref.Val
+}
+
+// NewEvalState returns an EvalState instanced used to observe the intermediate
+// evaluations of an expression.
+func NewEvalState() EvalState {
+ return &evalState{
+ values: make(map[int64]ref.Val),
+ }
+}
+
+// IDs implements the EvalState interface method.
+func (s *evalState) IDs() []int64 {
+ var ids []int64
+ for k, v := range s.values {
+ if v != nil {
+ ids = append(ids, k)
+ }
+ }
+ return ids
+}
+
+// Value is an implementation of the EvalState interface method.
+func (s *evalState) Value(exprID int64) (ref.Val, bool) {
+ val, found := s.values[exprID]
+ return val, found
+}
+
+// SetValue is an implementation of the EvalState interface method.
+func (s *evalState) SetValue(exprID int64, val ref.Val) {
+ if val == nil {
+ delete(s.values, exprID)
+ } else {
+ s.values[exprID] = val
+ }
+}
+
+// Reset implements the EvalState interface method.
+func (s *evalState) Reset() {
+ s.values = map[int64]ref.Val{}
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/formatting.go b/vendor/github.com/google/cel-go/interpreter/formatting.go
new file mode 100644
index 0000000..e3f7533
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/formatting.go
@@ -0,0 +1,383 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+type typeVerifier func(int64, ...ref.Type) (bool, error)
+
+// InterpolateFormattedString checks the syntax and cardinality of any string.format calls present in the expression and reports
+// any errors at compile time.
+func InterpolateFormattedString(verifier typeVerifier) InterpretableDecorator {
+ return func(inter Interpretable) (Interpretable, error) {
+ call, ok := inter.(InterpretableCall)
+ if !ok {
+ return inter, nil
+ }
+ if call.OverloadID() != "string_format" {
+ return inter, nil
+ }
+ args := call.Args()
+ if len(args) != 2 {
+ return nil, fmt.Errorf("wrong number of arguments to string.format (expected 2, got %d)", len(args))
+ }
+ fmtStrInter, ok := args[0].(InterpretableConst)
+ if !ok {
+ return inter, nil
+ }
+ var fmtArgsInter InterpretableConstructor
+ fmtArgsInter, ok = args[1].(InterpretableConstructor)
+ if !ok {
+ return inter, nil
+ }
+ if fmtArgsInter.Type() != types.ListType {
+ // don't necessarily return an error since the list may be DynType
+ return inter, nil
+ }
+ formatStr := fmtStrInter.Value().Value().(string)
+ initVals := fmtArgsInter.InitVals()
+
+ formatCheck := &formatCheck{
+ args: initVals,
+ verifier: verifier,
+ }
+ // use a placeholder locale, since locale doesn't affect syntax
+ _, err := ParseFormatString(formatStr, formatCheck, formatCheck, "en_US")
+ if err != nil {
+ return nil, err
+ }
+ seenArgs := formatCheck.argsRequested
+ if len(initVals) > seenArgs {
+ return nil, fmt.Errorf("too many arguments supplied to string.format (expected %d, got %d)", seenArgs, len(initVals))
+ }
+ return inter, nil
+ }
+}
+
+type formatCheck struct {
+ args []Interpretable
+ argsRequested int
+ curArgIndex int64
+ enableCheckArgTypes bool
+ verifier typeVerifier
+}
+
+func (c *formatCheck) String(arg ref.Val, locale string) (string, error) {
+ valid, err := verifyString(c.args[c.curArgIndex], c.verifier)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Decimal(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("integer clause can only be used on integers")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Fixed(precision *int) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ // we allow StringType since "NaN", "Infinity", and "-Infinity" are also valid values
+ valid, err := c.verifier(id, types.DoubleType, types.StringType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("fixed-point clause can only be used on doubles")
+ }
+ return "", nil
+ }
+}
+
+func (c *formatCheck) Scientific(precision *int) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.DoubleType, types.StringType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("scientific clause can only be used on doubles")
+ }
+ return "", nil
+ }
+}
+
+func (c *formatCheck) Binary(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType, types.BoolType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("only integers and bools can be formatted as binary")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Hex(useUpper bool) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType, types.StringType, types.BytesType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("only integers, byte buffers, and strings can be formatted as hex")
+ }
+ return "", nil
+ }
+}
+
+func (c *formatCheck) Octal(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("octal clause can only be used on integers")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Arg(index int64) (ref.Val, error) {
+ c.argsRequested++
+ c.curArgIndex = index
+ // return a dummy value - this is immediately passed to back to us
+ // through one of the FormatCallback functions, so anything will do
+ return types.Int(0), nil
+}
+
+func (c *formatCheck) ArgSize() int64 {
+ return int64(len(c.args))
+}
+
+func verifyString(sub Interpretable, verifier typeVerifier) (bool, error) {
+ subVerified, err := verifier(sub.ID(),
+ types.ListType, types.MapType, types.IntType, types.UintType, types.DoubleType,
+ types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType, types.NullType)
+ if err != nil {
+ return false, err
+ }
+ if !subVerified {
+ return false, nil
+ }
+ con, ok := sub.(InterpretableConstructor)
+ if ok {
+ members := con.InitVals()
+ for _, m := range members {
+ // recursively verify if we're dealing with a list/map
+ verified, err := verifyString(m, verifier)
+ if err != nil {
+ return false, err
+ }
+ if !verified {
+ return false, nil
+ }
+ }
+ }
+ return true, nil
+
+}
+
+// FormatStringInterpolator is an interface that allows user-defined behavior
+// for formatting clause implementations, as well as argument retrieval.
+// Each function is expected to support the appropriate types as laid out in
+// the string.format documentation, and to return an error if given an inappropriate type.
+type FormatStringInterpolator interface {
+ // String takes a ref.Val and a string representing the current locale identifier
+ // and returns the Val formatted as a string, or an error if one occurred.
+ String(ref.Val, string) (string, error)
+
+ // Decimal takes a ref.Val and a string representing the current locale identifier
+ // and returns the Val formatted as a decimal integer, or an error if one occurred.
+ Decimal(ref.Val, string) (string, error)
+
+ // Fixed takes an int pointer representing precision (or nil if none was given) and
+ // returns a function operating in a similar manner to String and Decimal, taking a
+ // ref.Val and locale and returning the appropriate string. A closure is returned
+ // so precision can be set without needing an additional function call/configuration.
+ Fixed(*int) func(ref.Val, string) (string, error)
+
+ // Scientific functions identically to Fixed, except the string returned from the closure
+ // is expected to be in scientific notation.
+ Scientific(*int) func(ref.Val, string) (string, error)
+
+ // Binary takes a ref.Val and a string representing the current locale identifier
+ // and returns the Val formatted as a binary integer, or an error if one occurred.
+ Binary(ref.Val, string) (string, error)
+
+ // Hex takes a boolean that, if true, indicates the hex string output by the returned
+ // closure should use uppercase letters for A-F.
+ Hex(bool) func(ref.Val, string) (string, error)
+
+ // Octal takes a ref.Val and a string representing the current locale identifier and
+ // returns the Val formatted in octal, or an error if one occurred.
+ Octal(ref.Val, string) (string, error)
+}
+
+// FormatList is an interface that allows user-defined list-like datatypes to be used
+// for formatting clause implementations.
+type FormatList interface {
+ // Arg returns the ref.Val at the given index, or an error if one occurred.
+ Arg(int64) (ref.Val, error)
+ // ArgSize returns the length of the argument list.
+ ArgSize() int64
+}
+
+type clauseImpl func(ref.Val, string) (string, error)
+
+// ParseFormatString formats a string according to the string.format syntax, taking the clause implementations
+// from the provided FormatCallback and the args from the given FormatList.
+func ParseFormatString(formatStr string, callback FormatStringInterpolator, list FormatList, locale string) (string, error) {
+ i := 0
+ argIndex := 0
+ var builtStr strings.Builder
+ for i < len(formatStr) {
+ if formatStr[i] == '%' {
+ if i+1 < len(formatStr) && formatStr[i+1] == '%' {
+ err := builtStr.WriteByte('%')
+ if err != nil {
+ return "", fmt.Errorf("error writing format string: %w", err)
+ }
+ i += 2
+ continue
+ } else {
+ argAny, err := list.Arg(int64(argIndex))
+ if err != nil {
+ return "", err
+ }
+ if i+1 >= len(formatStr) {
+ return "", errors.New("unexpected end of string")
+ }
+ if int64(argIndex) >= list.ArgSize() {
+ return "", fmt.Errorf("index %d out of range", argIndex)
+ }
+ numRead, val, refErr := parseAndFormatClause(formatStr[i:], argAny, callback, list, locale)
+ if refErr != nil {
+ return "", refErr
+ }
+ _, err = builtStr.WriteString(val)
+ if err != nil {
+ return "", fmt.Errorf("error writing format string: %w", err)
+ }
+ i += numRead
+ argIndex++
+ }
+ } else {
+ err := builtStr.WriteByte(formatStr[i])
+ if err != nil {
+ return "", fmt.Errorf("error writing format string: %w", err)
+ }
+ i++
+ }
+ }
+ return builtStr.String(), nil
+}
+
+// parseAndFormatClause parses the format clause at the start of the given string with val, and returns
+// how many characters were consumed and the substituted string form of val, or an error if one occurred.
+func parseAndFormatClause(formatStr string, val ref.Val, callback FormatStringInterpolator, list FormatList, locale string) (int, string, error) {
+ i := 1
+ read, formatter, err := parseFormattingClause(formatStr[i:], callback)
+ i += read
+ if err != nil {
+ return -1, "", fmt.Errorf("could not parse formatting clause: %s", err)
+ }
+
+ valStr, err := formatter(val, locale)
+ if err != nil {
+ return -1, "", fmt.Errorf("error during formatting: %s", err)
+ }
+ return i, valStr, nil
+}
+
+func parseFormattingClause(formatStr string, callback FormatStringInterpolator) (int, clauseImpl, error) {
+ i := 0
+ read, precision, err := parsePrecision(formatStr[i:])
+ i += read
+ if err != nil {
+ return -1, nil, fmt.Errorf("error while parsing precision: %w", err)
+ }
+ r := rune(formatStr[i])
+ i++
+ switch r {
+ case 's':
+ return i, callback.String, nil
+ case 'd':
+ return i, callback.Decimal, nil
+ case 'f':
+ return i, callback.Fixed(precision), nil
+ case 'e':
+ return i, callback.Scientific(precision), nil
+ case 'b':
+ return i, callback.Binary, nil
+ case 'x', 'X':
+ return i, callback.Hex(unicode.IsUpper(r)), nil
+ case 'o':
+ return i, callback.Octal, nil
+ default:
+ return -1, nil, fmt.Errorf("unrecognized formatting clause \"%c\"", r)
+ }
+}
+
+func parsePrecision(formatStr string) (int, *int, error) {
+ i := 0
+ if formatStr[i] != '.' {
+ return i, nil, nil
+ }
+ i++
+ var buffer strings.Builder
+ for {
+ if i >= len(formatStr) {
+ return -1, nil, errors.New("could not find end of precision specifier")
+ }
+ if !isASCIIDigit(rune(formatStr[i])) {
+ break
+ }
+ buffer.WriteByte(formatStr[i])
+ i++
+ }
+ precision, err := strconv.Atoi(buffer.String())
+ if err != nil {
+ return -1, nil, fmt.Errorf("error while converting precision to integer: %w", err)
+ }
+ return i, &precision, nil
+}
+
+func isASCIIDigit(r rune) bool {
+ return r <= unicode.MaxASCII && unicode.IsDigit(r)
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/interpretable.go b/vendor/github.com/google/cel-go/interpreter/interpretable.go
new file mode 100644
index 0000000..c4598df
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/interpretable.go
@@ -0,0 +1,1262 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// Interpretable can accept a given Activation and produce a value along with
+// an accompanying EvalState which can be used to inspect whether additional
+// data might be necessary to complete the evaluation.
+type Interpretable interface {
+ // ID value corresponding to the expression node.
+ ID() int64
+
+ // Eval an Activation to produce an output.
+ Eval(activation Activation) ref.Val
+}
+
+// InterpretableConst interface for tracking whether the Interpretable is a constant value.
+type InterpretableConst interface {
+ Interpretable
+
+ // Value returns the constant value of the instruction.
+ Value() ref.Val
+}
+
+// InterpretableAttribute interface for tracking whether the Interpretable is an attribute.
+type InterpretableAttribute interface {
+ Interpretable
+
+ // Attr returns the Attribute value.
+ Attr() Attribute
+
+ // Adapter returns the type adapter to be used for adapting resolved Attribute values.
+ Adapter() types.Adapter
+
+ // AddQualifier proxies the Attribute.AddQualifier method.
+ //
+ // Note, this method may mutate the current attribute state. If the desire is to clone the
+ // Attribute, the Attribute should first be copied before adding the qualifier. Attributes
+ // are not copyable by default, so this is a capable that would need to be added to the
+ // AttributeFactory or specifically to the underlying Attribute implementation.
+ AddQualifier(Qualifier) (Attribute, error)
+
+ // Qualify replicates the Attribute.Qualify method to permit extension and interception
+ // of object qualification.
+ Qualify(vars Activation, obj any) (any, error)
+
+ // QualifyIfPresent qualifies the object if the qualifier is declared or defined on the object.
+ // The 'presenceOnly' flag indicates that the value is not necessary, just a boolean status as
+ // to whether the qualifier is present.
+ QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error)
+
+ // IsOptional indicates whether the resulting value is an optional type.
+ IsOptional() bool
+
+ // Resolve returns the value of the Attribute given the current Activation.
+ Resolve(Activation) (any, error)
+}
+
+// InterpretableCall interface for inspecting Interpretable instructions related to function calls.
+type InterpretableCall interface {
+ Interpretable
+
+ // Function returns the function name as it appears in text or mangled operator name as it
+ // appears in the operators.go file.
+ Function() string
+
+ // OverloadID returns the overload id associated with the function specialization.
+ // Overload ids are stable across language boundaries and can be treated as synonymous with a
+ // unique function signature.
+ OverloadID() string
+
+ // Args returns the normalized arguments to the function overload.
+ // For receiver-style functions, the receiver target is arg 0.
+ Args() []Interpretable
+}
+
+// InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map
+// or struct.
+type InterpretableConstructor interface {
+ Interpretable
+
+ // InitVals returns all the list elements, map key and values or struct field values.
+ InitVals() []Interpretable
+
+ // Type returns the type constructed.
+ Type() ref.Type
+}
+
+// Core Interpretable implementations used during the program planning phase.
+
+type evalTestOnly struct {
+ id int64
+ InterpretableAttribute
+}
+
+// ID implements the Interpretable interface method.
+func (test *evalTestOnly) ID() int64 {
+ return test.id
+}
+
+// Eval implements the Interpretable interface method.
+func (test *evalTestOnly) Eval(ctx Activation) ref.Val {
+ val, err := test.Resolve(ctx)
+ // Return an error if the resolve step fails
+ if err != nil {
+ return types.WrapErr(err)
+ }
+ if optVal, isOpt := val.(*types.Optional); isOpt {
+ return types.Bool(optVal.HasValue())
+ }
+ return test.Adapter().NativeToValue(val)
+}
+
+// AddQualifier appends a qualifier that will always and only perform a presence test.
+func (test *evalTestOnly) AddQualifier(q Qualifier) (Attribute, error) {
+ cq, ok := q.(ConstantQualifier)
+ if !ok {
+ return nil, fmt.Errorf("test only expressions must have constant qualifiers: %v", q)
+ }
+ return test.InterpretableAttribute.AddQualifier(&testOnlyQualifier{ConstantQualifier: cq})
+}
+
+type testOnlyQualifier struct {
+ ConstantQualifier
+}
+
+// Qualify determines whether the test-only qualifier is present on the input object.
+func (q *testOnlyQualifier) Qualify(vars Activation, obj any) (any, error) {
+ out, present, err := q.ConstantQualifier.QualifyIfPresent(vars, obj, true)
+ if err != nil {
+ return nil, err
+ }
+ if unk, isUnk := out.(types.Unknown); isUnk {
+ return unk, nil
+ }
+ if opt, isOpt := out.(types.Optional); isOpt {
+ return opt.HasValue(), nil
+ }
+ return present, nil
+}
+
+// QualifyIfPresent returns whether the target field in the test-only expression is present.
+func (q *testOnlyQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ // Only ever test for presence.
+ return q.ConstantQualifier.QualifyIfPresent(vars, obj, true)
+}
+
+// QualifierValueEquals determines whether the test-only constant qualifier equals the input value.
+func (q *testOnlyQualifier) QualifierValueEquals(value any) bool {
+ // The input qualifier will always be of type string
+ return q.ConstantQualifier.Value().Value() == value
+}
+
+// NewConstValue creates a new constant valued Interpretable.
+func NewConstValue(id int64, val ref.Val) InterpretableConst {
+ return &evalConst{
+ id: id,
+ val: val,
+ }
+}
+
+type evalConst struct {
+ id int64
+ val ref.Val
+}
+
+// ID implements the Interpretable interface method.
+func (cons *evalConst) ID() int64 {
+ return cons.id
+}
+
+// Eval implements the Interpretable interface method.
+func (cons *evalConst) Eval(ctx Activation) ref.Val {
+ return cons.val
+}
+
+// Value implements the InterpretableConst interface method.
+func (cons *evalConst) Value() ref.Val {
+ return cons.val
+}
+
+type evalOr struct {
+ id int64
+ terms []Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (or *evalOr) ID() int64 {
+ return or.id
+}
+
+// Eval implements the Interpretable interface method.
+func (or *evalOr) Eval(ctx Activation) ref.Val {
+ var err ref.Val = nil
+ var unk *types.Unknown
+ for _, term := range or.terms {
+ val := term.Eval(ctx)
+ boolVal, ok := val.(types.Bool)
+ // short-circuit on true.
+ if ok && boolVal == types.True {
+ return types.True
+ }
+ if !ok {
+ isUnk := false
+ unk, isUnk = types.MaybeMergeUnknowns(val, unk)
+ if !isUnk && err == nil {
+ if types.IsError(val) {
+ err = val
+ } else {
+ err = types.MaybeNoSuchOverloadErr(val)
+ }
+ }
+ }
+ }
+ if unk != nil {
+ return unk
+ }
+ if err != nil {
+ return err
+ }
+ return types.False
+}
+
+type evalAnd struct {
+ id int64
+ terms []Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (and *evalAnd) ID() int64 {
+ return and.id
+}
+
+// Eval implements the Interpretable interface method.
+func (and *evalAnd) Eval(ctx Activation) ref.Val {
+ var err ref.Val = nil
+ var unk *types.Unknown
+ for _, term := range and.terms {
+ val := term.Eval(ctx)
+ boolVal, ok := val.(types.Bool)
+ // short-circuit on false.
+ if ok && boolVal == types.False {
+ return types.False
+ }
+ if !ok {
+ isUnk := false
+ unk, isUnk = types.MaybeMergeUnknowns(val, unk)
+ if !isUnk && err == nil {
+ if types.IsError(val) {
+ err = val
+ } else {
+ err = types.MaybeNoSuchOverloadErr(val)
+ }
+ }
+ }
+ }
+ if unk != nil {
+ return unk
+ }
+ if err != nil {
+ return err
+ }
+ return types.True
+}
+
+type evalEq struct {
+ id int64
+ lhs Interpretable
+ rhs Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (eq *evalEq) ID() int64 {
+ return eq.id
+}
+
+// Eval implements the Interpretable interface method.
+func (eq *evalEq) Eval(ctx Activation) ref.Val {
+ lVal := eq.lhs.Eval(ctx)
+ rVal := eq.rhs.Eval(ctx)
+ if types.IsUnknownOrError(lVal) {
+ return lVal
+ }
+ if types.IsUnknownOrError(rVal) {
+ return rVal
+ }
+ return types.Equal(lVal, rVal)
+}
+
+// Function implements the InterpretableCall interface method.
+func (*evalEq) Function() string {
+ return operators.Equals
+}
+
+// OverloadID implements the InterpretableCall interface method.
+func (*evalEq) OverloadID() string {
+ return overloads.Equals
+}
+
+// Args implements the InterpretableCall interface method.
+func (eq *evalEq) Args() []Interpretable {
+ return []Interpretable{eq.lhs, eq.rhs}
+}
+
+type evalNe struct {
+ id int64
+ lhs Interpretable
+ rhs Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (ne *evalNe) ID() int64 {
+ return ne.id
+}
+
+// Eval implements the Interpretable interface method.
+func (ne *evalNe) Eval(ctx Activation) ref.Val {
+ lVal := ne.lhs.Eval(ctx)
+ rVal := ne.rhs.Eval(ctx)
+ if types.IsUnknownOrError(lVal) {
+ return lVal
+ }
+ if types.IsUnknownOrError(rVal) {
+ return rVal
+ }
+ return types.Bool(types.Equal(lVal, rVal) != types.True)
+}
+
+// Function implements the InterpretableCall interface method.
+func (*evalNe) Function() string {
+ return operators.NotEquals
+}
+
+// OverloadID implements the InterpretableCall interface method.
+func (*evalNe) OverloadID() string {
+ return overloads.NotEquals
+}
+
+// Args implements the InterpretableCall interface method.
+func (ne *evalNe) Args() []Interpretable {
+ return []Interpretable{ne.lhs, ne.rhs}
+}
+
+type evalZeroArity struct {
+ id int64
+ function string
+ overload string
+ impl functions.FunctionOp
+}
+
+// ID implements the Interpretable interface method.
+func (zero *evalZeroArity) ID() int64 {
+ return zero.id
+}
+
+// Eval implements the Interpretable interface method.
+func (zero *evalZeroArity) Eval(ctx Activation) ref.Val {
+ return zero.impl()
+}
+
+// Function implements the InterpretableCall interface method.
+func (zero *evalZeroArity) Function() string {
+ return zero.function
+}
+
+// OverloadID implements the InterpretableCall interface method.
+func (zero *evalZeroArity) OverloadID() string {
+ return zero.overload
+}
+
+// Args returns the argument to the unary function.
+func (zero *evalZeroArity) Args() []Interpretable {
+ return []Interpretable{}
+}
+
+type evalUnary struct {
+ id int64
+ function string
+ overload string
+ arg Interpretable
+ trait int
+ impl functions.UnaryOp
+ nonStrict bool
+}
+
+// ID implements the Interpretable interface method.
+func (un *evalUnary) ID() int64 {
+ return un.id
+}
+
+// Eval implements the Interpretable interface method.
+func (un *evalUnary) Eval(ctx Activation) ref.Val {
+ argVal := un.arg.Eval(ctx)
+ // Early return if the argument to the function is unknown or error.
+ strict := !un.nonStrict
+ if strict && types.IsUnknownOrError(argVal) {
+ return argVal
+ }
+ // If the implementation is bound and the argument value has the right traits required to
+ // invoke it, then call the implementation.
+ if un.impl != nil && (un.trait == 0 || (!strict && types.IsUnknownOrError(argVal)) || argVal.Type().HasTrait(un.trait)) {
+ return un.impl(argVal)
+ }
+ // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the
+ // operand (arg0).
+ if argVal.Type().HasTrait(traits.ReceiverType) {
+ return argVal.(traits.Receiver).Receive(un.function, un.overload, []ref.Val{})
+ }
+ return types.NewErr("no such overload: %s", un.function)
+}
+
+// Function implements the InterpretableCall interface method.
+func (un *evalUnary) Function() string {
+ return un.function
+}
+
+// OverloadID implements the InterpretableCall interface method.
+func (un *evalUnary) OverloadID() string {
+ return un.overload
+}
+
+// Args returns the argument to the unary function.
+func (un *evalUnary) Args() []Interpretable {
+ return []Interpretable{un.arg}
+}
+
+type evalBinary struct {
+ id int64
+ function string
+ overload string
+ lhs Interpretable
+ rhs Interpretable
+ trait int
+ impl functions.BinaryOp
+ nonStrict bool
+}
+
+// ID implements the Interpretable interface method.
+func (bin *evalBinary) ID() int64 {
+ return bin.id
+}
+
+// Eval implements the Interpretable interface method.
+func (bin *evalBinary) Eval(ctx Activation) ref.Val {
+ lVal := bin.lhs.Eval(ctx)
+ rVal := bin.rhs.Eval(ctx)
+ // Early return if any argument to the function is unknown or error.
+ strict := !bin.nonStrict
+ if strict {
+ if types.IsUnknownOrError(lVal) {
+ return lVal
+ }
+ if types.IsUnknownOrError(rVal) {
+ return rVal
+ }
+ }
+ // If the implementation is bound and the argument value has the right traits required to
+ // invoke it, then call the implementation.
+ if bin.impl != nil && (bin.trait == 0 || (!strict && types.IsUnknownOrError(lVal)) || lVal.Type().HasTrait(bin.trait)) {
+ return bin.impl(lVal, rVal)
+ }
+ // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the
+ // operand (arg0).
+ if lVal.Type().HasTrait(traits.ReceiverType) {
+ return lVal.(traits.Receiver).Receive(bin.function, bin.overload, []ref.Val{rVal})
+ }
+ return types.NewErr("no such overload: %s", bin.function)
+}
+
+// Function implements the InterpretableCall interface method.
+func (bin *evalBinary) Function() string {
+ return bin.function
+}
+
+// OverloadID implements the InterpretableCall interface method.
+func (bin *evalBinary) OverloadID() string {
+ return bin.overload
+}
+
+// Args returns the argument to the unary function.
+func (bin *evalBinary) Args() []Interpretable {
+ return []Interpretable{bin.lhs, bin.rhs}
+}
+
+type evalVarArgs struct {
+ id int64
+ function string
+ overload string
+ args []Interpretable
+ trait int
+ impl functions.FunctionOp
+ nonStrict bool
+}
+
+// NewCall creates a new call Interpretable.
+func NewCall(id int64, function, overload string, args []Interpretable, impl functions.FunctionOp) InterpretableCall {
+ return &evalVarArgs{
+ id: id,
+ function: function,
+ overload: overload,
+ args: args,
+ impl: impl,
+ }
+}
+
+// ID implements the Interpretable interface method.
+func (fn *evalVarArgs) ID() int64 {
+ return fn.id
+}
+
+// Eval implements the Interpretable interface method.
+func (fn *evalVarArgs) Eval(ctx Activation) ref.Val {
+ argVals := make([]ref.Val, len(fn.args))
+ // Early return if any argument to the function is unknown or error.
+ strict := !fn.nonStrict
+ for i, arg := range fn.args {
+ argVals[i] = arg.Eval(ctx)
+ if strict && types.IsUnknownOrError(argVals[i]) {
+ return argVals[i]
+ }
+ }
+ // If the implementation is bound and the argument value has the right traits required to
+ // invoke it, then call the implementation.
+ arg0 := argVals[0]
+ if fn.impl != nil && (fn.trait == 0 || (!strict && types.IsUnknownOrError(arg0)) || arg0.Type().HasTrait(fn.trait)) {
+ return fn.impl(argVals...)
+ }
+ // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the
+ // operand (arg0).
+ if arg0.Type().HasTrait(traits.ReceiverType) {
+ return arg0.(traits.Receiver).Receive(fn.function, fn.overload, argVals[1:])
+ }
+ return types.NewErr("no such overload: %s", fn.function)
+}
+
+// Function implements the InterpretableCall interface method.
+func (fn *evalVarArgs) Function() string {
+ return fn.function
+}
+
+// OverloadID implements the InterpretableCall interface method.
+func (fn *evalVarArgs) OverloadID() string {
+ return fn.overload
+}
+
+// Args returns the argument to the unary function.
+func (fn *evalVarArgs) Args() []Interpretable {
+ return fn.args
+}
+
+type evalList struct {
+ id int64
+ elems []Interpretable
+ optionals []bool
+ hasOptionals bool
+ adapter types.Adapter
+}
+
+// ID implements the Interpretable interface method.
+func (l *evalList) ID() int64 {
+ return l.id
+}
+
+// Eval implements the Interpretable interface method.
+func (l *evalList) Eval(ctx Activation) ref.Val {
+ elemVals := make([]ref.Val, 0, len(l.elems))
+ // If any argument is unknown or error early terminate.
+ for i, elem := range l.elems {
+ elemVal := elem.Eval(ctx)
+ if types.IsUnknownOrError(elemVal) {
+ return elemVal
+ }
+ if l.hasOptionals && l.optionals[i] {
+ optVal, ok := elemVal.(*types.Optional)
+ if !ok {
+ return invalidOptionalElementInit(elemVal)
+ }
+ if !optVal.HasValue() {
+ continue
+ }
+ elemVal = optVal.GetValue()
+ }
+ elemVals = append(elemVals, elemVal)
+ }
+ return l.adapter.NativeToValue(elemVals)
+}
+
+func (l *evalList) InitVals() []Interpretable {
+ return l.elems
+}
+
+func (l *evalList) Type() ref.Type {
+ return types.ListType
+}
+
+type evalMap struct {
+ id int64
+ keys []Interpretable
+ vals []Interpretable
+ optionals []bool
+ hasOptionals bool
+ adapter types.Adapter
+}
+
+// ID implements the Interpretable interface method.
+func (m *evalMap) ID() int64 {
+ return m.id
+}
+
+// Eval implements the Interpretable interface method.
+func (m *evalMap) Eval(ctx Activation) ref.Val {
+ entries := make(map[ref.Val]ref.Val)
+ // If any argument is unknown or error early terminate.
+ for i, key := range m.keys {
+ keyVal := key.Eval(ctx)
+ if types.IsUnknownOrError(keyVal) {
+ return keyVal
+ }
+ valVal := m.vals[i].Eval(ctx)
+ if types.IsUnknownOrError(valVal) {
+ return valVal
+ }
+ if m.hasOptionals && m.optionals[i] {
+ optVal, ok := valVal.(*types.Optional)
+ if !ok {
+ return invalidOptionalEntryInit(keyVal, valVal)
+ }
+ if !optVal.HasValue() {
+ delete(entries, keyVal)
+ continue
+ }
+ valVal = optVal.GetValue()
+ }
+ entries[keyVal] = valVal
+ }
+ return m.adapter.NativeToValue(entries)
+}
+
+func (m *evalMap) InitVals() []Interpretable {
+ if len(m.keys) != len(m.vals) {
+ return nil
+ }
+ result := make([]Interpretable, len(m.keys)+len(m.vals))
+ idx := 0
+ for i, k := range m.keys {
+ v := m.vals[i]
+ result[idx] = k
+ idx++
+ result[idx] = v
+ idx++
+ }
+ return result
+}
+
+func (m *evalMap) Type() ref.Type {
+ return types.MapType
+}
+
+type evalObj struct {
+ id int64
+ typeName string
+ fields []string
+ vals []Interpretable
+ optionals []bool
+ hasOptionals bool
+ provider types.Provider
+}
+
+// ID implements the Interpretable interface method.
+func (o *evalObj) ID() int64 {
+ return o.id
+}
+
+// Eval implements the Interpretable interface method.
+func (o *evalObj) Eval(ctx Activation) ref.Val {
+ fieldVals := make(map[string]ref.Val)
+ // If any argument is unknown or error early terminate.
+ for i, field := range o.fields {
+ val := o.vals[i].Eval(ctx)
+ if types.IsUnknownOrError(val) {
+ return val
+ }
+ if o.hasOptionals && o.optionals[i] {
+ optVal, ok := val.(*types.Optional)
+ if !ok {
+ return invalidOptionalEntryInit(field, val)
+ }
+ if !optVal.HasValue() {
+ delete(fieldVals, field)
+ continue
+ }
+ val = optVal.GetValue()
+ }
+ fieldVals[field] = val
+ }
+ return o.provider.NewValue(o.typeName, fieldVals)
+}
+
+func (o *evalObj) InitVals() []Interpretable {
+ return o.vals
+}
+
+func (o *evalObj) Type() ref.Type {
+ return types.NewObjectTypeValue(o.typeName)
+}
+
+type evalFold struct {
+ id int64
+ accuVar string
+ iterVar string
+ iterRange Interpretable
+ accu Interpretable
+ cond Interpretable
+ step Interpretable
+ result Interpretable
+ adapter types.Adapter
+ exhaustive bool
+ interruptable bool
+}
+
+// ID implements the Interpretable interface method.
+func (fold *evalFold) ID() int64 {
+ return fold.id
+}
+
+// Eval implements the Interpretable interface method.
+func (fold *evalFold) Eval(ctx Activation) ref.Val {
+ foldRange := fold.iterRange.Eval(ctx)
+ if !foldRange.Type().HasTrait(traits.IterableType) {
+ return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange)
+ }
+ // Configure the fold activation with the accumulator initial value.
+ accuCtx := varActivationPool.Get().(*varActivation)
+ accuCtx.parent = ctx
+ accuCtx.name = fold.accuVar
+ accuCtx.val = fold.accu.Eval(ctx)
+ // If the accumulator starts as an empty list, then the comprehension will build a list
+ // so create a mutable list to optimize the cost of the inner loop.
+ l, ok := accuCtx.val.(traits.Lister)
+ buildingList := false
+ if !fold.exhaustive && ok && l.Size() == types.IntZero {
+ buildingList = true
+ accuCtx.val = types.NewMutableList(fold.adapter)
+ }
+ iterCtx := varActivationPool.Get().(*varActivation)
+ iterCtx.parent = accuCtx
+ iterCtx.name = fold.iterVar
+
+ interrupted := false
+ it := foldRange.(traits.Iterable).Iterator()
+ for it.HasNext() == types.True {
+ // Modify the iter var in the fold activation.
+ iterCtx.val = it.Next()
+
+ // Evaluate the condition, terminate the loop if false.
+ cond := fold.cond.Eval(iterCtx)
+ condBool, ok := cond.(types.Bool)
+ if !fold.exhaustive && ok && condBool != types.True {
+ break
+ }
+ // Evaluate the evaluation step into accu var.
+ accuCtx.val = fold.step.Eval(iterCtx)
+ if fold.interruptable {
+ if stop, found := ctx.ResolveName("#interrupted"); found && stop == true {
+ interrupted = true
+ break
+ }
+ }
+ }
+ varActivationPool.Put(iterCtx)
+ if interrupted {
+ varActivationPool.Put(accuCtx)
+ return types.NewErr("operation interrupted")
+ }
+
+ // Compute the result.
+ res := fold.result.Eval(accuCtx)
+ varActivationPool.Put(accuCtx)
+ // Convert a mutable list to an immutable one, if the comprehension has generated a list as a result.
+ if !types.IsUnknownOrError(res) && buildingList {
+ if _, ok := res.(traits.MutableLister); ok {
+ res = res.(traits.MutableLister).ToImmutableList()
+ }
+ }
+ return res
+}
+
+// Optional Interpretable implementations that specialize, subsume, or extend the core evaluation
+// plan via decorators.
+
+// evalSetMembership is an Interpretable implementation which tests whether an input value
+// exists within the set of map keys used to model a set.
+type evalSetMembership struct {
+ inst Interpretable
+ arg Interpretable
+ valueSet map[ref.Val]ref.Val
+}
+
+// ID implements the Interpretable interface method.
+func (e *evalSetMembership) ID() int64 {
+ return e.inst.ID()
+}
+
+// Eval implements the Interpretable interface method.
+func (e *evalSetMembership) Eval(ctx Activation) ref.Val {
+ val := e.arg.Eval(ctx)
+ if types.IsUnknownOrError(val) {
+ return val
+ }
+ if ret, found := e.valueSet[val]; found {
+ return ret
+ }
+ return types.False
+}
+
+// evalWatch is an Interpretable implementation that wraps the execution of a given
+// expression so that it may observe the computed value and send it to an observer.
+type evalWatch struct {
+ Interpretable
+ observer EvalObserver
+}
+
+// Eval implements the Interpretable interface method.
+func (e *evalWatch) Eval(ctx Activation) ref.Val {
+ val := e.Interpretable.Eval(ctx)
+ e.observer(e.ID(), e.Interpretable, val)
+ return val
+}
+
+// evalWatchAttr describes a watcher of an InterpretableAttribute Interpretable.
+//
+// Since the watcher may be selected against at a later stage in program planning, the watcher
+// must implement the InterpretableAttribute interface by proxy.
+type evalWatchAttr struct {
+ InterpretableAttribute
+ observer EvalObserver
+}
+
+// AddQualifier creates a wrapper over the incoming qualifier which observes the qualification
+// result.
+func (e *evalWatchAttr) AddQualifier(q Qualifier) (Attribute, error) {
+ switch qual := q.(type) {
+ // By default, the qualifier is either a constant or an attribute
+ // There may be some custom cases where the attribute is neither.
+ case ConstantQualifier:
+ // Expose a method to test whether the qualifier matches the input pattern.
+ q = &evalWatchConstQual{
+ ConstantQualifier: qual,
+ observer: e.observer,
+ adapter: e.Adapter(),
+ }
+ case *evalWatchAttr:
+ // Unwrap the evalWatchAttr since the observation will be applied during Qualify or
+ // QualifyIfPresent rather than Eval.
+ q = &evalWatchAttrQual{
+ Attribute: qual.InterpretableAttribute,
+ observer: e.observer,
+ adapter: e.Adapter(),
+ }
+ case Attribute:
+ // Expose methods which intercept the qualification prior to being applied as a qualifier.
+ // Using this interface ensures that the qualifier is converted to a constant value one
+ // time during attribute pattern matching as the method embeds the Attribute interface
+ // needed to trip the conversion to a constant.
+ q = &evalWatchAttrQual{
+ Attribute: qual,
+ observer: e.observer,
+ adapter: e.Adapter(),
+ }
+ default:
+ // This is likely a custom qualifier type.
+ q = &evalWatchQual{
+ Qualifier: qual,
+ observer: e.observer,
+ adapter: e.Adapter(),
+ }
+ }
+ _, err := e.InterpretableAttribute.AddQualifier(q)
+ return e, err
+}
+
+// Eval implements the Interpretable interface method.
+func (e *evalWatchAttr) Eval(vars Activation) ref.Val {
+ val := e.InterpretableAttribute.Eval(vars)
+ e.observer(e.ID(), e.InterpretableAttribute, val)
+ return val
+}
+
+// evalWatchConstQual observes the qualification of an object using a constant boolean, int,
+// string, or uint.
+type evalWatchConstQual struct {
+ ConstantQualifier
+ observer EvalObserver
+ adapter types.Adapter
+}
+
+// Qualify observes the qualification of a object via a constant boolean, int, string, or uint.
+func (e *evalWatchConstQual) Qualify(vars Activation, obj any) (any, error) {
+ out, err := e.ConstantQualifier.Qualify(vars, obj)
+ var val ref.Val
+ if err != nil {
+ val = types.WrapErr(err)
+ } else {
+ val = e.adapter.NativeToValue(out)
+ }
+ e.observer(e.ID(), e.ConstantQualifier, val)
+ return out, err
+}
+
+// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present.
+func (e *evalWatchConstQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ out, present, err := e.ConstantQualifier.QualifyIfPresent(vars, obj, presenceOnly)
+ var val ref.Val
+ if err != nil {
+ val = types.WrapErr(err)
+ } else if out != nil {
+ val = e.adapter.NativeToValue(out)
+ } else if presenceOnly {
+ val = types.Bool(present)
+ }
+ if present || presenceOnly {
+ e.observer(e.ID(), e.ConstantQualifier, val)
+ }
+ return out, present, err
+}
+
+// QualifierValueEquals tests whether the incoming value is equal to the qualifying constant.
+func (e *evalWatchConstQual) QualifierValueEquals(value any) bool {
+ qve, ok := e.ConstantQualifier.(qualifierValueEquator)
+ return ok && qve.QualifierValueEquals(value)
+}
+
+// evalWatchAttrQual observes the qualification of an object by a value computed at runtime.
+type evalWatchAttrQual struct {
+ Attribute
+ observer EvalObserver
+ adapter ref.TypeAdapter
+}
+
+// Qualify observes the qualification of a object via a value computed at runtime.
+func (e *evalWatchAttrQual) Qualify(vars Activation, obj any) (any, error) {
+ out, err := e.Attribute.Qualify(vars, obj)
+ var val ref.Val
+ if err != nil {
+ val = types.WrapErr(err)
+ } else {
+ val = e.adapter.NativeToValue(out)
+ }
+ e.observer(e.ID(), e.Attribute, val)
+ return out, err
+}
+
+// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present.
+func (e *evalWatchAttrQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ out, present, err := e.Attribute.QualifyIfPresent(vars, obj, presenceOnly)
+ var val ref.Val
+ if err != nil {
+ val = types.WrapErr(err)
+ } else if out != nil {
+ val = e.adapter.NativeToValue(out)
+ } else if presenceOnly {
+ val = types.Bool(present)
+ }
+ if present || presenceOnly {
+ e.observer(e.ID(), e.Attribute, val)
+ }
+ return out, present, err
+}
+
+// evalWatchQual observes the qualification of an object by a value computed at runtime.
+type evalWatchQual struct {
+ Qualifier
+ observer EvalObserver
+ adapter types.Adapter
+}
+
+// Qualify observes the qualification of a object via a value computed at runtime.
+func (e *evalWatchQual) Qualify(vars Activation, obj any) (any, error) {
+ out, err := e.Qualifier.Qualify(vars, obj)
+ var val ref.Val
+ if err != nil {
+ val = types.WrapErr(err)
+ } else {
+ val = e.adapter.NativeToValue(out)
+ }
+ e.observer(e.ID(), e.Qualifier, val)
+ return out, err
+}
+
+// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present.
+func (e *evalWatchQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ out, present, err := e.Qualifier.QualifyIfPresent(vars, obj, presenceOnly)
+ var val ref.Val
+ if err != nil {
+ val = types.WrapErr(err)
+ } else if out != nil {
+ val = e.adapter.NativeToValue(out)
+ } else if presenceOnly {
+ val = types.Bool(present)
+ }
+ if present || presenceOnly {
+ e.observer(e.ID(), e.Qualifier, val)
+ }
+ return out, present, err
+}
+
+// evalWatchConst describes a watcher of an instConst Interpretable.
+type evalWatchConst struct {
+ InterpretableConst
+ observer EvalObserver
+}
+
+// Eval implements the Interpretable interface method.
+func (e *evalWatchConst) Eval(vars Activation) ref.Val {
+ val := e.Value()
+ e.observer(e.ID(), e.InterpretableConst, val)
+ return val
+}
+
+// evalExhaustiveOr is just like evalOr, but does not short-circuit argument evaluation.
+type evalExhaustiveOr struct {
+ id int64
+ terms []Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (or *evalExhaustiveOr) ID() int64 {
+ return or.id
+}
+
+// Eval implements the Interpretable interface method.
+func (or *evalExhaustiveOr) Eval(ctx Activation) ref.Val {
+ var err ref.Val = nil
+ var unk *types.Unknown
+ isTrue := false
+ for _, term := range or.terms {
+ val := term.Eval(ctx)
+ boolVal, ok := val.(types.Bool)
+ // flag the result as true
+ if ok && boolVal == types.True {
+ isTrue = true
+ }
+ if !ok && !isTrue {
+ isUnk := false
+ unk, isUnk = types.MaybeMergeUnknowns(val, unk)
+ if !isUnk && err == nil {
+ if types.IsError(val) {
+ err = val
+ } else {
+ err = types.MaybeNoSuchOverloadErr(val)
+ }
+ }
+ }
+ }
+ if isTrue {
+ return types.True
+ }
+ if unk != nil {
+ return unk
+ }
+ if err != nil {
+ return err
+ }
+ return types.False
+}
+
+// evalExhaustiveAnd is just like evalAnd, but does not short-circuit argument evaluation.
+type evalExhaustiveAnd struct {
+ id int64
+ terms []Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (and *evalExhaustiveAnd) ID() int64 {
+ return and.id
+}
+
+// Eval implements the Interpretable interface method.
+func (and *evalExhaustiveAnd) Eval(ctx Activation) ref.Val {
+ var err ref.Val = nil
+ var unk *types.Unknown
+ isFalse := false
+ for _, term := range and.terms {
+ val := term.Eval(ctx)
+ boolVal, ok := val.(types.Bool)
+ // short-circuit on false.
+ if ok && boolVal == types.False {
+ isFalse = true
+ }
+ if !ok && !isFalse {
+ isUnk := false
+ unk, isUnk = types.MaybeMergeUnknowns(val, unk)
+ if !isUnk && err == nil {
+ if types.IsError(val) {
+ err = val
+ } else {
+ err = types.MaybeNoSuchOverloadErr(val)
+ }
+ }
+ }
+ }
+ if isFalse {
+ return types.False
+ }
+ if unk != nil {
+ return unk
+ }
+ if err != nil {
+ return err
+ }
+ return types.True
+}
+
+// evalExhaustiveConditional is like evalConditional, but does not short-circuit argument
+// evaluation.
+type evalExhaustiveConditional struct {
+ id int64
+ adapter types.Adapter
+ attr *conditionalAttribute
+}
+
+// ID implements the Interpretable interface method.
+func (cond *evalExhaustiveConditional) ID() int64 {
+ return cond.id
+}
+
+// Eval implements the Interpretable interface method.
+func (cond *evalExhaustiveConditional) Eval(ctx Activation) ref.Val {
+ cVal := cond.attr.expr.Eval(ctx)
+ tVal, tErr := cond.attr.truthy.Resolve(ctx)
+ fVal, fErr := cond.attr.falsy.Resolve(ctx)
+ cBool, ok := cVal.(types.Bool)
+ if !ok {
+ return types.ValOrErr(cVal, "no such overload")
+ }
+ if cBool {
+ if tErr != nil {
+ return types.WrapErr(tErr)
+ }
+ return cond.adapter.NativeToValue(tVal)
+ }
+ if fErr != nil {
+ return types.WrapErr(fErr)
+ }
+ return cond.adapter.NativeToValue(fVal)
+}
+
+// evalAttr evaluates an Attribute value.
+type evalAttr struct {
+ adapter types.Adapter
+ attr Attribute
+ optional bool
+}
+
+var _ InterpretableAttribute = &evalAttr{}
+
+// ID of the attribute instruction.
+func (a *evalAttr) ID() int64 {
+ return a.attr.ID()
+}
+
+// AddQualifier implements the InterpretableAttribute interface method.
+func (a *evalAttr) AddQualifier(qual Qualifier) (Attribute, error) {
+ attr, err := a.attr.AddQualifier(qual)
+ a.attr = attr
+ return attr, err
+}
+
+// Attr implements the InterpretableAttribute interface method.
+func (a *evalAttr) Attr() Attribute {
+ return a.attr
+}
+
+// Adapter implements the InterpretableAttribute interface method.
+func (a *evalAttr) Adapter() types.Adapter {
+ return a.adapter
+}
+
+// Eval implements the Interpretable interface method.
+func (a *evalAttr) Eval(ctx Activation) ref.Val {
+ v, err := a.attr.Resolve(ctx)
+ if err != nil {
+ return types.WrapErr(err)
+ }
+ return a.adapter.NativeToValue(v)
+}
+
+// Qualify proxies to the Attribute's Qualify method.
+func (a *evalAttr) Qualify(ctx Activation, obj any) (any, error) {
+ return a.attr.Qualify(ctx, obj)
+}
+
+// QualifyIfPresent proxies to the Attribute's QualifyIfPresent method.
+func (a *evalAttr) QualifyIfPresent(ctx Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return a.attr.QualifyIfPresent(ctx, obj, presenceOnly)
+}
+
+func (a *evalAttr) IsOptional() bool {
+ return a.optional
+}
+
+// Resolve proxies to the Attribute's Resolve method.
+func (a *evalAttr) Resolve(ctx Activation) (any, error) {
+ return a.attr.Resolve(ctx)
+}
+
+type evalWatchConstructor struct {
+ constructor InterpretableConstructor
+ observer EvalObserver
+}
+
+// InitVals implements the InterpretableConstructor InitVals function.
+func (c *evalWatchConstructor) InitVals() []Interpretable {
+ return c.constructor.InitVals()
+}
+
+// Type implements the InterpretableConstructor Type function.
+func (c *evalWatchConstructor) Type() ref.Type {
+ return c.constructor.Type()
+}
+
+// ID implements the Interpretable ID function.
+func (c *evalWatchConstructor) ID() int64 {
+ return c.constructor.ID()
+}
+
+// Eval implements the Interpretable Eval function.
+func (c *evalWatchConstructor) Eval(ctx Activation) ref.Val {
+ val := c.constructor.Eval(ctx)
+ c.observer(c.ID(), c.constructor, val)
+ return val
+}
+
+func invalidOptionalEntryInit(field any, value ref.Val) ref.Val {
+ return types.NewErr("cannot initialize optional entry '%v' from non-optional value %v", field, value)
+}
+
+func invalidOptionalElementInit(value ref.Val) ref.Val {
+ return types.NewErr("cannot initialize optional list element from non-optional value %v", value)
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/interpreter.go b/vendor/github.com/google/cel-go/interpreter/interpreter.go
new file mode 100644
index 0000000..00fc747
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/interpreter.go
@@ -0,0 +1,205 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package interpreter provides functions to evaluate parsed expressions with
+// the option to augment the evaluation with inputs and functions supplied at
+// evaluation time.
+package interpreter
+
+import (
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Interpreter generates a new Interpretable from a checked or unchecked expression.
+type Interpreter interface {
+ // NewInterpretable creates an Interpretable from a checked expression and an
+ // optional list of InterpretableDecorator values.
+ NewInterpretable(checked *ast.CheckedAST, decorators ...InterpretableDecorator) (Interpretable, error)
+
+ // NewUncheckedInterpretable returns an Interpretable from a parsed expression
+ // and an optional list of InterpretableDecorator values.
+ NewUncheckedInterpretable(expr *exprpb.Expr, decorators ...InterpretableDecorator) (Interpretable, error)
+}
+
+// EvalObserver is a functional interface that accepts an expression id and an observed value.
+// The id identifies the expression that was evaluated, the programStep is the Interpretable or Qualifier that
+// was evaluated and value is the result of the evaluation.
+type EvalObserver func(id int64, programStep any, value ref.Val)
+
+// Observe constructs a decorator that calls all the provided observers in order after evaluating each Interpretable
+// or Qualifier during program evaluation.
+func Observe(observers ...EvalObserver) InterpretableDecorator {
+ if len(observers) == 1 {
+ return decObserveEval(observers[0])
+ }
+ observeFn := func(id int64, programStep any, val ref.Val) {
+ for _, observer := range observers {
+ observer(id, programStep, val)
+ }
+ }
+ return decObserveEval(observeFn)
+}
+
+// EvalCancelledError represents a cancelled program evaluation operation.
+type EvalCancelledError struct {
+ Message string
+ // Type identifies the cause of the cancellation.
+ Cause CancellationCause
+}
+
+func (e EvalCancelledError) Error() string {
+ return e.Message
+}
+
+// CancellationCause enumerates the ways a program evaluation operation can be cancelled.
+type CancellationCause int
+
+const (
+ // ContextCancelled indicates that the operation was cancelled in response to a Golang context cancellation.
+ ContextCancelled CancellationCause = iota
+
+ // CostLimitExceeded indicates that the operation was cancelled in response to the actual cost limit being
+ // exceeded.
+ CostLimitExceeded
+)
+
+// TODO: Replace all usages of TrackState with EvalStateObserver
+
+// TrackState decorates each expression node with an observer which records the value
+// associated with the given expression id. EvalState must be provided to the decorator.
+// This decorator is not thread-safe, and the EvalState must be reset between Eval()
+// calls.
+// DEPRECATED: Please use EvalStateObserver instead. It composes gracefully with additional observers.
+func TrackState(state EvalState) InterpretableDecorator {
+ return Observe(EvalStateObserver(state))
+}
+
+// EvalStateObserver provides an observer which records the value
+// associated with the given expression id. EvalState must be provided to the observer.
+// This decorator is not thread-safe, and the EvalState must be reset between Eval()
+// calls.
+func EvalStateObserver(state EvalState) EvalObserver {
+ return func(id int64, programStep any, val ref.Val) {
+ state.SetValue(id, val)
+ }
+}
+
+// ExhaustiveEval replaces operations that short-circuit with versions that evaluate
+// expressions and couples this behavior with the TrackState() decorator to provide
+// insight into the evaluation state of the entire expression. EvalState must be
+// provided to the decorator. This decorator is not thread-safe, and the EvalState
+// must be reset between Eval() calls.
+func ExhaustiveEval() InterpretableDecorator {
+ ex := decDisableShortcircuits()
+ return func(i Interpretable) (Interpretable, error) {
+ return ex(i)
+ }
+}
+
+// InterruptableEval annotates comprehension loops with information that indicates they
+// should check the `#interrupted` state within a custom Activation.
+//
+// The custom activation is currently managed higher up in the stack within the 'cel' package
+// and should not require any custom support on behalf of callers.
+func InterruptableEval() InterpretableDecorator {
+ return decInterruptFolds()
+}
+
+// Optimize will pre-compute operations such as list and map construction and optimize
+// call arguments to set membership tests. The set of optimizations will increase over time.
+func Optimize() InterpretableDecorator {
+ return decOptimize()
+}
+
+// RegexOptimization provides a way to replace an InterpretableCall for a regex function when the
+// RegexIndex argument is a string constant. Typically, the Factory would compile the regex pattern at
+// RegexIndex and report any errors (at program creation time) and then use the compiled regex for
+// all regex function invocations.
+type RegexOptimization struct {
+ // Function is the name of the function to optimize.
+ Function string
+ // OverloadID is the ID of the overload to optimize.
+ OverloadID string
+ // RegexIndex is the index position of the regex pattern argument. Only calls to the function where this argument is
+ // a string constant will be delegated to this optimizer.
+ RegexIndex int
+ // Factory constructs a replacement InterpretableCall node that optimizes the regex function call. Factory is
+ // provided with the unoptimized regex call and the string constant at the RegexIndex argument.
+ // The Factory may compile the regex for use across all invocations of the call, return any errors and
+ // return an interpreter.NewCall with the desired regex optimized function impl.
+ Factory func(call InterpretableCall, regexPattern string) (InterpretableCall, error)
+}
+
+// CompileRegexConstants compiles regex pattern string constants at program creation time and reports any regex pattern
+// compile errors.
+func CompileRegexConstants(regexOptimizations ...*RegexOptimization) InterpretableDecorator {
+ return decRegexOptimizer(regexOptimizations...)
+}
+
+type exprInterpreter struct {
+ dispatcher Dispatcher
+ container *containers.Container
+ provider types.Provider
+ adapter types.Adapter
+ attrFactory AttributeFactory
+}
+
+// NewInterpreter builds an Interpreter from a Dispatcher and TypeProvider which will be used
+// throughout the Eval of all Interpretable instances generated from it.
+func NewInterpreter(dispatcher Dispatcher,
+ container *containers.Container,
+ provider types.Provider,
+ adapter types.Adapter,
+ attrFactory AttributeFactory) Interpreter {
+ return &exprInterpreter{
+ dispatcher: dispatcher,
+ container: container,
+ provider: provider,
+ adapter: adapter,
+ attrFactory: attrFactory}
+}
+
+// NewIntepretable implements the Interpreter interface method.
+func (i *exprInterpreter) NewInterpretable(
+ checked *ast.CheckedAST,
+ decorators ...InterpretableDecorator) (Interpretable, error) {
+ p := newPlanner(
+ i.dispatcher,
+ i.provider,
+ i.adapter,
+ i.attrFactory,
+ i.container,
+ checked,
+ decorators...)
+ return p.Plan(checked.Expr)
+}
+
+// NewUncheckedIntepretable implements the Interpreter interface method.
+func (i *exprInterpreter) NewUncheckedInterpretable(
+ expr *exprpb.Expr,
+ decorators ...InterpretableDecorator) (Interpretable, error) {
+ p := newUncheckedPlanner(
+ i.dispatcher,
+ i.provider,
+ i.adapter,
+ i.attrFactory,
+ i.container,
+ decorators...)
+ return p.Plan(expr)
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/optimizations.go b/vendor/github.com/google/cel-go/interpreter/optimizations.go
new file mode 100644
index 0000000..2fc87e6
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/optimizations.go
@@ -0,0 +1,46 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "regexp"
+
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// MatchesRegexOptimization optimizes the 'matches' standard library function by compiling the regex pattern and
+// reporting any compilation errors at program creation time, and using the compiled regex pattern for all function
+// call invocations.
+var MatchesRegexOptimization = &RegexOptimization{
+ Function: "matches",
+ RegexIndex: 1,
+ Factory: func(call InterpretableCall, regexPattern string) (InterpretableCall, error) {
+ compiledRegex, err := regexp.Compile(regexPattern)
+ if err != nil {
+ return nil, err
+ }
+ return NewCall(call.ID(), call.Function(), call.OverloadID(), call.Args(), func(values ...ref.Val) ref.Val {
+ if len(values) != 2 {
+ return types.NoSuchOverloadErr()
+ }
+ in, ok := values[0].Value().(string)
+ if !ok {
+ return types.NoSuchOverloadErr()
+ }
+ return types.Bool(compiledRegex.MatchString(in))
+ }), nil
+ },
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/planner.go b/vendor/github.com/google/cel-go/interpreter/planner.go
new file mode 100644
index 0000000..757cd08
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/planner.go
@@ -0,0 +1,791 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// interpretablePlanner creates an Interpretable evaluation plan from a proto Expr value.
+type interpretablePlanner interface {
+ // Plan generates an Interpretable value (or error) from the input proto Expr.
+ Plan(expr *exprpb.Expr) (Interpretable, error)
+}
+
+// newPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider,
+// TypeAdapter, Container, and CheckedExpr value. These pieces of data are used to resolve
+// functions, types, and namespaced identifiers at plan time rather than at runtime since
+// it only needs to be done once and may be semi-expensive to compute.
+func newPlanner(disp Dispatcher,
+ provider types.Provider,
+ adapter types.Adapter,
+ attrFactory AttributeFactory,
+ cont *containers.Container,
+ checked *ast.CheckedAST,
+ decorators ...InterpretableDecorator) interpretablePlanner {
+ return &planner{
+ disp: disp,
+ provider: provider,
+ adapter: adapter,
+ attrFactory: attrFactory,
+ container: cont,
+ refMap: checked.ReferenceMap,
+ typeMap: checked.TypeMap,
+ decorators: decorators,
+ }
+}
+
+// newUncheckedPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider,
+// TypeAdapter, and Container to resolve functions and types at plan time. Namespaces present in
+// Select expressions are resolved lazily at evaluation time.
+func newUncheckedPlanner(disp Dispatcher,
+ provider types.Provider,
+ adapter types.Adapter,
+ attrFactory AttributeFactory,
+ cont *containers.Container,
+ decorators ...InterpretableDecorator) interpretablePlanner {
+ return &planner{
+ disp: disp,
+ provider: provider,
+ adapter: adapter,
+ attrFactory: attrFactory,
+ container: cont,
+ refMap: make(map[int64]*ast.ReferenceInfo),
+ typeMap: make(map[int64]*types.Type),
+ decorators: decorators,
+ }
+}
+
+// planner is an implementation of the interpretablePlanner interface.
+type planner struct {
+ disp Dispatcher
+ provider types.Provider
+ adapter types.Adapter
+ attrFactory AttributeFactory
+ container *containers.Container
+ refMap map[int64]*ast.ReferenceInfo
+ typeMap map[int64]*types.Type
+ decorators []InterpretableDecorator
+}
+
+// Plan implements the interpretablePlanner interface. This implementation of the Plan method also
+// applies decorators to each Interpretable generated as part of the overall plan. Decorators are
+// useful for layering functionality into the evaluation that is not natively understood by CEL,
+// such as state-tracking, expression re-write, and possibly efficient thread-safe memoization of
+// repeated expressions.
+func (p *planner) Plan(expr *exprpb.Expr) (Interpretable, error) {
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_CallExpr:
+ return p.decorate(p.planCall(expr))
+ case *exprpb.Expr_IdentExpr:
+ return p.decorate(p.planIdent(expr))
+ case *exprpb.Expr_SelectExpr:
+ return p.decorate(p.planSelect(expr))
+ case *exprpb.Expr_ListExpr:
+ return p.decorate(p.planCreateList(expr))
+ case *exprpb.Expr_StructExpr:
+ return p.decorate(p.planCreateStruct(expr))
+ case *exprpb.Expr_ComprehensionExpr:
+ return p.decorate(p.planComprehension(expr))
+ case *exprpb.Expr_ConstExpr:
+ return p.decorate(p.planConst(expr))
+ }
+ return nil, fmt.Errorf("unsupported expr: %v", expr)
+}
+
+// decorate applies the InterpretableDecorator functions to the given Interpretable.
+// Both the Interpretable and error generated by a Plan step are accepted as arguments
+// for convenience.
+func (p *planner) decorate(i Interpretable, err error) (Interpretable, error) {
+ if err != nil {
+ return nil, err
+ }
+ for _, dec := range p.decorators {
+ i, err = dec(i)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return i, nil
+}
+
+// planIdent creates an Interpretable that resolves an identifier from an Activation.
+func (p *planner) planIdent(expr *exprpb.Expr) (Interpretable, error) {
+ // Establish whether the identifier is in the reference map.
+ if identRef, found := p.refMap[expr.GetId()]; found {
+ return p.planCheckedIdent(expr.GetId(), identRef)
+ }
+ // Create the possible attribute list for the unresolved reference.
+ ident := expr.GetIdentExpr()
+ return &evalAttr{
+ adapter: p.adapter,
+ attr: p.attrFactory.MaybeAttribute(expr.GetId(), ident.Name),
+ }, nil
+}
+
+func (p *planner) planCheckedIdent(id int64, identRef *ast.ReferenceInfo) (Interpretable, error) {
+ // Plan a constant reference if this is the case for this simple identifier.
+ if identRef.Value != nil {
+ return NewConstValue(id, identRef.Value), nil
+ }
+
+ // Check to see whether the type map indicates this is a type name. All types should be
+ // registered with the provider.
+ cType := p.typeMap[id]
+ if cType.Kind() == types.TypeKind {
+ cVal, found := p.provider.FindIdent(identRef.Name)
+ if !found {
+ return nil, fmt.Errorf("reference to undefined type: %s", identRef.Name)
+ }
+ return NewConstValue(id, cVal), nil
+ }
+
+ // Otherwise, return the attribute for the resolved identifier name.
+ return &evalAttr{
+ adapter: p.adapter,
+ attr: p.attrFactory.AbsoluteAttribute(id, identRef.Name),
+ }, nil
+}
+
+// planSelect creates an Interpretable with either:
+//
+// a) selects a field from a map or proto.
+// b) creates a field presence test for a select within a has() macro.
+// c) resolves the select expression to a namespaced identifier.
+func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) {
+ // If the Select id appears in the reference map from the CheckedExpr proto then it is either
+ // a namespaced identifier or enum value.
+ if identRef, found := p.refMap[expr.GetId()]; found {
+ return p.planCheckedIdent(expr.GetId(), identRef)
+ }
+
+ sel := expr.GetSelectExpr()
+ // Plan the operand evaluation.
+ op, err := p.Plan(sel.GetOperand())
+ if err != nil {
+ return nil, err
+ }
+ opType := p.typeMap[sel.GetOperand().GetId()]
+
+ // If the Select was marked TestOnly, this is a presence test.
+ //
+ // Note: presence tests are defined for structured (e.g. proto) and dynamic values (map, json)
+ // as follows:
+ // - True if the object field has a non-default value, e.g. obj.str != ""
+ // - True if the dynamic value has the field defined, e.g. key in map
+ //
+ // However, presence tests are not defined for qualified identifier names with primitive types.
+ // If a string named 'a.b.c' is declared in the environment and referenced within `has(a.b.c)`,
+ // it is not clear whether has should error or follow the convention defined for structured
+ // values.
+
+ // Establish the attribute reference.
+ attr, isAttr := op.(InterpretableAttribute)
+ if !isAttr {
+ attr, err = p.relativeAttr(op.ID(), op, false)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Build a qualifier for the attribute.
+ qual, err := p.attrFactory.NewQualifier(opType, expr.GetId(), sel.GetField(), false)
+ if err != nil {
+ return nil, err
+ }
+ // Modify the attribute to be test-only.
+ if sel.GetTestOnly() {
+ attr = &evalTestOnly{
+ id: expr.GetId(),
+ InterpretableAttribute: attr,
+ }
+ }
+ // Append the qualifier on the attribute.
+ _, err = attr.AddQualifier(qual)
+ return attr, err
+}
+
+// planCall creates a callable Interpretable while specializing for common functions and invocation
+// patterns. Specifically, conditional operators &&, ||, ?:, and (in)equality functions result in
+// optimized Interpretable values.
+func (p *planner) planCall(expr *exprpb.Expr) (Interpretable, error) {
+ call := expr.GetCallExpr()
+ target, fnName, oName := p.resolveFunction(expr)
+ argCount := len(call.GetArgs())
+ var offset int
+ if target != nil {
+ argCount++
+ offset++
+ }
+
+ args := make([]Interpretable, argCount)
+ if target != nil {
+ arg, err := p.Plan(target)
+ if err != nil {
+ return nil, err
+ }
+ args[0] = arg
+ }
+ for i, argExpr := range call.GetArgs() {
+ arg, err := p.Plan(argExpr)
+ if err != nil {
+ return nil, err
+ }
+ args[i+offset] = arg
+ }
+
+ // Generate specialized Interpretable operators by function name if possible.
+ switch fnName {
+ case operators.LogicalAnd:
+ return p.planCallLogicalAnd(expr, args)
+ case operators.LogicalOr:
+ return p.planCallLogicalOr(expr, args)
+ case operators.Conditional:
+ return p.planCallConditional(expr, args)
+ case operators.Equals:
+ return p.planCallEqual(expr, args)
+ case operators.NotEquals:
+ return p.planCallNotEqual(expr, args)
+ case operators.Index:
+ return p.planCallIndex(expr, args, false)
+ case operators.OptSelect, operators.OptIndex:
+ return p.planCallIndex(expr, args, true)
+ }
+
+ // Otherwise, generate Interpretable calls specialized by argument count.
+ // Try to find the specific function by overload id.
+ var fnDef *functions.Overload
+ if oName != "" {
+ fnDef, _ = p.disp.FindOverload(oName)
+ }
+ // If the overload id couldn't resolve the function, try the simple function name.
+ if fnDef == nil {
+ fnDef, _ = p.disp.FindOverload(fnName)
+ }
+ switch argCount {
+ case 0:
+ return p.planCallZero(expr, fnName, oName, fnDef)
+ case 1:
+ // If the FunctionOp has been used, then use it as it may exist for the purposes
+ // of dynamic dispatch within a singleton function implementation.
+ if fnDef != nil && fnDef.Unary == nil && fnDef.Function != nil {
+ return p.planCallVarArgs(expr, fnName, oName, fnDef, args)
+ }
+ return p.planCallUnary(expr, fnName, oName, fnDef, args)
+ case 2:
+ // If the FunctionOp has been used, then use it as it may exist for the purposes
+ // of dynamic dispatch within a singleton function implementation.
+ if fnDef != nil && fnDef.Binary == nil && fnDef.Function != nil {
+ return p.planCallVarArgs(expr, fnName, oName, fnDef, args)
+ }
+ return p.planCallBinary(expr, fnName, oName, fnDef, args)
+ default:
+ return p.planCallVarArgs(expr, fnName, oName, fnDef, args)
+ }
+}
+
+// planCallZero generates a zero-arity callable Interpretable.
+func (p *planner) planCallZero(expr *exprpb.Expr,
+ function string,
+ overload string,
+ impl *functions.Overload) (Interpretable, error) {
+ if impl == nil || impl.Function == nil {
+ return nil, fmt.Errorf("no such overload: %s()", function)
+ }
+ return &evalZeroArity{
+ id: expr.GetId(),
+ function: function,
+ overload: overload,
+ impl: impl.Function,
+ }, nil
+}
+
+// planCallUnary generates a unary callable Interpretable.
+func (p *planner) planCallUnary(expr *exprpb.Expr,
+ function string,
+ overload string,
+ impl *functions.Overload,
+ args []Interpretable) (Interpretable, error) {
+ var fn functions.UnaryOp
+ var trait int
+ var nonStrict bool
+ if impl != nil {
+ if impl.Unary == nil {
+ return nil, fmt.Errorf("no such overload: %s(arg)", function)
+ }
+ fn = impl.Unary
+ trait = impl.OperandTrait
+ nonStrict = impl.NonStrict
+ }
+ return &evalUnary{
+ id: expr.GetId(),
+ function: function,
+ overload: overload,
+ arg: args[0],
+ trait: trait,
+ impl: fn,
+ nonStrict: nonStrict,
+ }, nil
+}
+
+// planCallBinary generates a binary callable Interpretable.
+func (p *planner) planCallBinary(expr *exprpb.Expr,
+ function string,
+ overload string,
+ impl *functions.Overload,
+ args []Interpretable) (Interpretable, error) {
+ var fn functions.BinaryOp
+ var trait int
+ var nonStrict bool
+ if impl != nil {
+ if impl.Binary == nil {
+ return nil, fmt.Errorf("no such overload: %s(lhs, rhs)", function)
+ }
+ fn = impl.Binary
+ trait = impl.OperandTrait
+ nonStrict = impl.NonStrict
+ }
+ return &evalBinary{
+ id: expr.GetId(),
+ function: function,
+ overload: overload,
+ lhs: args[0],
+ rhs: args[1],
+ trait: trait,
+ impl: fn,
+ nonStrict: nonStrict,
+ }, nil
+}
+
+// planCallVarArgs generates a variable argument callable Interpretable.
+func (p *planner) planCallVarArgs(expr *exprpb.Expr,
+ function string,
+ overload string,
+ impl *functions.Overload,
+ args []Interpretable) (Interpretable, error) {
+ var fn functions.FunctionOp
+ var trait int
+ var nonStrict bool
+ if impl != nil {
+ if impl.Function == nil {
+ return nil, fmt.Errorf("no such overload: %s(...)", function)
+ }
+ fn = impl.Function
+ trait = impl.OperandTrait
+ nonStrict = impl.NonStrict
+ }
+ return &evalVarArgs{
+ id: expr.GetId(),
+ function: function,
+ overload: overload,
+ args: args,
+ trait: trait,
+ impl: fn,
+ nonStrict: nonStrict,
+ }, nil
+}
+
+// planCallEqual generates an equals (==) Interpretable.
+func (p *planner) planCallEqual(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
+ return &evalEq{
+ id: expr.GetId(),
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+}
+
+// planCallNotEqual generates a not equals (!=) Interpretable.
+func (p *planner) planCallNotEqual(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
+ return &evalNe{
+ id: expr.GetId(),
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+}
+
+// planCallLogicalAnd generates a logical and (&&) Interpretable.
+func (p *planner) planCallLogicalAnd(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
+ return &evalAnd{
+ id: expr.GetId(),
+ terms: args,
+ }, nil
+}
+
+// planCallLogicalOr generates a logical or (||) Interpretable.
+func (p *planner) planCallLogicalOr(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
+ return &evalOr{
+ id: expr.GetId(),
+ terms: args,
+ }, nil
+}
+
+// planCallConditional generates a conditional / ternary (c ? t : f) Interpretable.
+func (p *planner) planCallConditional(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
+ cond := args[0]
+ t := args[1]
+ var tAttr Attribute
+ truthyAttr, isTruthyAttr := t.(InterpretableAttribute)
+ if isTruthyAttr {
+ tAttr = truthyAttr.Attr()
+ } else {
+ tAttr = p.attrFactory.RelativeAttribute(t.ID(), t)
+ }
+
+ f := args[2]
+ var fAttr Attribute
+ falsyAttr, isFalsyAttr := f.(InterpretableAttribute)
+ if isFalsyAttr {
+ fAttr = falsyAttr.Attr()
+ } else {
+ fAttr = p.attrFactory.RelativeAttribute(f.ID(), f)
+ }
+
+ return &evalAttr{
+ adapter: p.adapter,
+ attr: p.attrFactory.ConditionalAttribute(expr.GetId(), cond, tAttr, fAttr),
+ }, nil
+}
+
+// planCallIndex either extends an attribute with the argument to the index operation, or creates
+// a relative attribute based on the return of a function call or operation.
+func (p *planner) planCallIndex(expr *exprpb.Expr, args []Interpretable, optional bool) (Interpretable, error) {
+ op := args[0]
+ ind := args[1]
+ opType := p.typeMap[op.ID()]
+
+ // Establish the attribute reference.
+ var err error
+ attr, isAttr := op.(InterpretableAttribute)
+ if !isAttr {
+ attr, err = p.relativeAttr(op.ID(), op, false)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Construct the qualifier type.
+ var qual Qualifier
+ switch ind := ind.(type) {
+ case InterpretableConst:
+ qual, err = p.attrFactory.NewQualifier(opType, expr.GetId(), ind.Value(), optional)
+ case InterpretableAttribute:
+ qual, err = p.attrFactory.NewQualifier(opType, expr.GetId(), ind, optional)
+ default:
+ qual, err = p.relativeAttr(expr.GetId(), ind, optional)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Add the qualifier to the attribute
+ _, err = attr.AddQualifier(qual)
+ return attr, err
+}
+
+// planCreateList generates a list construction Interpretable.
+func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) {
+ list := expr.GetListExpr()
+ optionalIndices := list.GetOptionalIndices()
+ elements := list.GetElements()
+ optionals := make([]bool, len(elements))
+ for _, index := range optionalIndices {
+ if index < 0 || index >= int32(len(elements)) {
+ return nil, fmt.Errorf("optional index %d out of element bounds [0, %d]", index, len(elements))
+ }
+ optionals[index] = true
+ }
+ elems := make([]Interpretable, len(elements))
+ for i, elem := range elements {
+ elemVal, err := p.Plan(elem)
+ if err != nil {
+ return nil, err
+ }
+ elems[i] = elemVal
+ }
+ return &evalList{
+ id: expr.GetId(),
+ elems: elems,
+ optionals: optionals,
+ hasOptionals: len(optionals) != 0,
+ adapter: p.adapter,
+ }, nil
+}
+
+// planCreateStruct generates a map or object construction Interpretable.
+func (p *planner) planCreateStruct(expr *exprpb.Expr) (Interpretable, error) {
+ str := expr.GetStructExpr()
+ if len(str.MessageName) != 0 {
+ return p.planCreateObj(expr)
+ }
+ entries := str.GetEntries()
+ optionals := make([]bool, len(entries))
+ keys := make([]Interpretable, len(entries))
+ vals := make([]Interpretable, len(entries))
+ for i, entry := range entries {
+ keyVal, err := p.Plan(entry.GetMapKey())
+ if err != nil {
+ return nil, err
+ }
+ keys[i] = keyVal
+
+ valVal, err := p.Plan(entry.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ vals[i] = valVal
+ optionals[i] = entry.GetOptionalEntry()
+ }
+ return &evalMap{
+ id: expr.GetId(),
+ keys: keys,
+ vals: vals,
+ optionals: optionals,
+ hasOptionals: len(optionals) != 0,
+ adapter: p.adapter,
+ }, nil
+}
+
+// planCreateObj generates an object construction Interpretable.
+func (p *planner) planCreateObj(expr *exprpb.Expr) (Interpretable, error) {
+ obj := expr.GetStructExpr()
+ typeName, defined := p.resolveTypeName(obj.GetMessageName())
+ if !defined {
+ return nil, fmt.Errorf("unknown type: %s", obj.GetMessageName())
+ }
+ entries := obj.GetEntries()
+ optionals := make([]bool, len(entries))
+ fields := make([]string, len(entries))
+ vals := make([]Interpretable, len(entries))
+ for i, entry := range entries {
+ fields[i] = entry.GetFieldKey()
+ val, err := p.Plan(entry.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ vals[i] = val
+ optionals[i] = entry.GetOptionalEntry()
+ }
+ return &evalObj{
+ id: expr.GetId(),
+ typeName: typeName,
+ fields: fields,
+ vals: vals,
+ optionals: optionals,
+ hasOptionals: len(optionals) != 0,
+ provider: p.provider,
+ }, nil
+}
+
+// planComprehension generates an Interpretable fold operation.
+func (p *planner) planComprehension(expr *exprpb.Expr) (Interpretable, error) {
+ fold := expr.GetComprehensionExpr()
+ accu, err := p.Plan(fold.GetAccuInit())
+ if err != nil {
+ return nil, err
+ }
+ iterRange, err := p.Plan(fold.GetIterRange())
+ if err != nil {
+ return nil, err
+ }
+ cond, err := p.Plan(fold.GetLoopCondition())
+ if err != nil {
+ return nil, err
+ }
+ step, err := p.Plan(fold.GetLoopStep())
+ if err != nil {
+ return nil, err
+ }
+ result, err := p.Plan(fold.GetResult())
+ if err != nil {
+ return nil, err
+ }
+ return &evalFold{
+ id: expr.GetId(),
+ accuVar: fold.AccuVar,
+ accu: accu,
+ iterVar: fold.IterVar,
+ iterRange: iterRange,
+ cond: cond,
+ step: step,
+ result: result,
+ adapter: p.adapter,
+ }, nil
+}
+
+// planConst generates a constant valued Interpretable.
+func (p *planner) planConst(expr *exprpb.Expr) (Interpretable, error) {
+ val, err := p.constValue(expr.GetConstExpr())
+ if err != nil {
+ return nil, err
+ }
+ return NewConstValue(expr.GetId(), val), nil
+}
+
+// constValue converts a proto Constant value to a ref.Val.
+func (p *planner) constValue(c *exprpb.Constant) (ref.Val, error) {
+ switch c.GetConstantKind().(type) {
+ case *exprpb.Constant_BoolValue:
+ return p.adapter.NativeToValue(c.GetBoolValue()), nil
+ case *exprpb.Constant_BytesValue:
+ return p.adapter.NativeToValue(c.GetBytesValue()), nil
+ case *exprpb.Constant_DoubleValue:
+ return p.adapter.NativeToValue(c.GetDoubleValue()), nil
+ case *exprpb.Constant_DurationValue:
+ return p.adapter.NativeToValue(c.GetDurationValue().AsDuration()), nil
+ case *exprpb.Constant_Int64Value:
+ return p.adapter.NativeToValue(c.GetInt64Value()), nil
+ case *exprpb.Constant_NullValue:
+ return p.adapter.NativeToValue(c.GetNullValue()), nil
+ case *exprpb.Constant_StringValue:
+ return p.adapter.NativeToValue(c.GetStringValue()), nil
+ case *exprpb.Constant_TimestampValue:
+ return p.adapter.NativeToValue(c.GetTimestampValue().AsTime()), nil
+ case *exprpb.Constant_Uint64Value:
+ return p.adapter.NativeToValue(c.GetUint64Value()), nil
+ }
+ return nil, fmt.Errorf("unknown constant type: %v", c)
+}
+
+// resolveTypeName takes a qualified string constructed at parse time, applies the proto
+// namespace resolution rules to it in a scan over possible matching types in the TypeProvider.
+func (p *planner) resolveTypeName(typeName string) (string, bool) {
+ for _, qualifiedTypeName := range p.container.ResolveCandidateNames(typeName) {
+ if _, found := p.provider.FindStructType(qualifiedTypeName); found {
+ return qualifiedTypeName, true
+ }
+ }
+ return "", false
+}
+
+// resolveFunction determines the call target, function name, and overload name from a given Expr
+// value.
+//
+// The resolveFunction resolves ambiguities where a function may either be a receiver-style
+// invocation or a qualified global function name.
+// - The target expression may only consist of ident and select expressions.
+// - The function is declared in the environment using its fully-qualified name.
+// - The fully-qualified function name matches the string serialized target value.
+func (p *planner) resolveFunction(expr *exprpb.Expr) (*exprpb.Expr, string, string) {
+ // Note: similar logic exists within the `checker/checker.go`. If making changes here
+ // please consider the impact on checker.go and consolidate implementations or mirror code
+ // as appropriate.
+ call := expr.GetCallExpr()
+ target := call.GetTarget()
+ fnName := call.GetFunction()
+
+ // Checked expressions always have a reference map entry, and _should_ have the fully qualified
+ // function name as the fnName value.
+ oRef, hasOverload := p.refMap[expr.GetId()]
+ if hasOverload {
+ if len(oRef.OverloadIDs) == 1 {
+ return target, fnName, oRef.OverloadIDs[0]
+ }
+ // Note, this namespaced function name will not appear as a fully qualified name in ASTs
+ // built and stored before cel-go v0.5.0; however, this functionality did not work at all
+ // before the v0.5.0 release.
+ return target, fnName, ""
+ }
+
+ // Parse-only expressions need to handle the same logic as is normally performed at check time,
+ // but with potentially much less information. The only reliable source of information about
+ // which functions are configured is the dispatcher.
+ if target == nil {
+ // If the user has a parse-only expression, then it should have been configured as such in
+ // the interpreter dispatcher as it may have been omitted from the checker environment.
+ for _, qualifiedName := range p.container.ResolveCandidateNames(fnName) {
+ _, found := p.disp.FindOverload(qualifiedName)
+ if found {
+ return nil, qualifiedName, ""
+ }
+ }
+ // It's possible that the overload was not found, but this situation is accounted for in
+ // the planCall phase; however, the leading dot used for denoting fully-qualified
+ // namespaced identifiers must be stripped, as all declarations already use fully-qualified
+ // names. This stripping behavior is handled automatically by the ResolveCandidateNames
+ // call.
+ return target, stripLeadingDot(fnName), ""
+ }
+
+ // Handle the situation where the function target actually indicates a qualified function name.
+ qualifiedPrefix, maybeQualified := p.toQualifiedName(target)
+ if maybeQualified {
+ maybeQualifiedName := qualifiedPrefix + "." + fnName
+ for _, qualifiedName := range p.container.ResolveCandidateNames(maybeQualifiedName) {
+ _, found := p.disp.FindOverload(qualifiedName)
+ if found {
+ // Clear the target to ensure the proper arity is used for finding the
+ // implementation.
+ return nil, qualifiedName, ""
+ }
+ }
+ }
+ // In the default case, the function is exactly as it was advertised: a receiver call on with
+ // an expression-based target with the given simple function name.
+ return target, fnName, ""
+}
+
+// relativeAttr indicates that the attribute in this case acts as a qualifier and as such needs to
+// be observed to ensure that it's evaluation value is properly recorded for state tracking.
+func (p *planner) relativeAttr(id int64, eval Interpretable, opt bool) (InterpretableAttribute, error) {
+ eAttr, ok := eval.(InterpretableAttribute)
+ if !ok {
+ eAttr = &evalAttr{
+ adapter: p.adapter,
+ attr: p.attrFactory.RelativeAttribute(id, eval),
+ optional: opt,
+ }
+ }
+ // This looks like it should either decorate the new evalAttr node, or early return the InterpretableAttribute
+ decAttr, err := p.decorate(eAttr, nil)
+ if err != nil {
+ return nil, err
+ }
+ eAttr, ok = decAttr.(InterpretableAttribute)
+ if !ok {
+ return nil, fmt.Errorf("invalid attribute decoration: %v(%T)", decAttr, decAttr)
+ }
+ return eAttr, nil
+}
+
+// toQualifiedName converts an expression AST into a qualified name if possible, with a boolean
+// 'found' value that indicates if the conversion is successful.
+func (p *planner) toQualifiedName(operand *exprpb.Expr) (string, bool) {
+ // If the checker identified the expression as an attribute by the type-checker, then it can't
+ // possibly be part of qualified name in a namespace.
+ _, isAttr := p.refMap[operand.GetId()]
+ if isAttr {
+ return "", false
+ }
+ // Since functions cannot be both namespaced and receiver functions, if the operand is not an
+ // qualified variable name, return the (possibly) qualified name given the expressions.
+ return containers.ToQualifiedName(operand)
+}
+
+func stripLeadingDot(name string) string {
+ if strings.HasPrefix(name, ".") {
+ return name[1:]
+ }
+ return name
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/prune.go b/vendor/github.com/google/cel-go/interpreter/prune.go
new file mode 100644
index 0000000..b8834b1
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/prune.go
@@ -0,0 +1,619 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+type astPruner struct {
+ expr *exprpb.Expr
+ macroCalls map[int64]*exprpb.Expr
+ state EvalState
+ nextExprID int64
+}
+
+// TODO Consider having a separate walk of the AST that finds common
+// subexpressions. This can be called before or after constant folding to find
+// common subexpressions.
+
+// PruneAst prunes the given AST based on the given EvalState and generates a new AST.
+// Given AST is copied on write and a new AST is returned.
+// Couple of typical use cases this interface would be:
+//
+// A)
+// 1) Evaluate expr with some unknowns,
+// 2) If result is unknown:
+//
+// a) PruneAst
+// b) Goto 1
+//
+// Functional call results which are known would be effectively cached across
+// iterations.
+//
+// B)
+// 1) Compile the expression (maybe via a service and maybe after checking a
+//
+// compiled expression does not exists in local cache)
+//
+// 2) Prepare the environment and the interpreter. Activation might be empty.
+// 3) Eval the expression. This might return unknown or error or a concrete
+//
+// value.
+//
+// 4) PruneAst
+// 4) Maybe cache the expression
+// This is effectively constant folding the expression. How the environment is
+// prepared in step 2 is flexible. For example, If the caller caches the
+// compiled and constant folded expressions, but is not willing to constant
+// fold(and thus cache results of) some external calls, then they can prepare
+// the overloads accordingly.
+func PruneAst(expr *exprpb.Expr, macroCalls map[int64]*exprpb.Expr, state EvalState) *exprpb.ParsedExpr {
+ pruneState := NewEvalState()
+ for _, id := range state.IDs() {
+ v, _ := state.Value(id)
+ pruneState.SetValue(id, v)
+ }
+ pruner := &astPruner{
+ expr: expr,
+ macroCalls: macroCalls,
+ state: pruneState,
+ nextExprID: getMaxID(expr)}
+ newExpr, _ := pruner.maybePrune(expr)
+ return &exprpb.ParsedExpr{
+ Expr: newExpr,
+ SourceInfo: &exprpb.SourceInfo{MacroCalls: pruner.macroCalls},
+ }
+}
+
+func (p *astPruner) createLiteral(id int64, val *exprpb.Constant) *exprpb.Expr {
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_ConstExpr{
+ ConstExpr: val,
+ },
+ }
+}
+
+func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, bool) {
+ switch v := val.(type) {
+ case types.Bool:
+ p.state.SetValue(id, val)
+ return p.createLiteral(id,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: bool(v)}}), true
+ case types.Bytes:
+ p.state.SetValue(id, val)
+ return p.createLiteral(id,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: []byte(v)}}), true
+ case types.Double:
+ p.state.SetValue(id, val)
+ return p.createLiteral(id,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: float64(v)}}), true
+ case types.Duration:
+ p.state.SetValue(id, val)
+ durationString := string(v.ConvertToType(types.StringType).(types.String))
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{
+ Function: overloads.TypeConvertDuration,
+ Args: []*exprpb.Expr{
+ p.createLiteral(p.nextID(),
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: durationString}}),
+ },
+ },
+ },
+ }, true
+ case types.Int:
+ p.state.SetValue(id, val)
+ return p.createLiteral(id,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: int64(v)}}), true
+ case types.Uint:
+ p.state.SetValue(id, val)
+ return p.createLiteral(id,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: uint64(v)}}), true
+ case types.String:
+ p.state.SetValue(id, val)
+ return p.createLiteral(id,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: string(v)}}), true
+ case types.Null:
+ p.state.SetValue(id, val)
+ return p.createLiteral(id,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: v.Value().(structpb.NullValue)}}), true
+ }
+
+ // Attempt to build a list literal.
+ if list, isList := val.(traits.Lister); isList {
+ sz := list.Size().(types.Int)
+ elemExprs := make([]*exprpb.Expr, sz)
+ for i := types.Int(0); i < sz; i++ {
+ elem := list.Get(i)
+ if types.IsUnknownOrError(elem) {
+ return nil, false
+ }
+ elemExpr, ok := p.maybeCreateLiteral(p.nextID(), elem)
+ if !ok {
+ return nil, false
+ }
+ elemExprs[i] = elemExpr
+ }
+ p.state.SetValue(id, val)
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_ListExpr{
+ ListExpr: &exprpb.Expr_CreateList{
+ Elements: elemExprs,
+ },
+ },
+ }, true
+ }
+
+ // Create a map literal if possible.
+ if mp, isMap := val.(traits.Mapper); isMap {
+ it := mp.Iterator()
+ entries := make([]*exprpb.Expr_CreateStruct_Entry, mp.Size().(types.Int))
+ i := 0
+ for it.HasNext() != types.False {
+ key := it.Next()
+ val := mp.Get(key)
+ if types.IsUnknownOrError(key) || types.IsUnknownOrError(val) {
+ return nil, false
+ }
+ keyExpr, ok := p.maybeCreateLiteral(p.nextID(), key)
+ if !ok {
+ return nil, false
+ }
+ valExpr, ok := p.maybeCreateLiteral(p.nextID(), val)
+ if !ok {
+ return nil, false
+ }
+ entry := &exprpb.Expr_CreateStruct_Entry{
+ Id: p.nextID(),
+ KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{
+ MapKey: keyExpr,
+ },
+ Value: valExpr,
+ }
+ entries[i] = entry
+ i++
+ }
+ p.state.SetValue(id, val)
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{
+ Entries: entries,
+ },
+ },
+ }, true
+ }
+
+ // TODO(issues/377) To construct message literals, the type provider will need to support
+ // the enumeration the fields for a given message.
+ return nil, false
+}
+
+func (p *astPruner) maybePruneOptional(elem *exprpb.Expr) (*exprpb.Expr, bool) {
+ elemVal, found := p.value(elem.GetId())
+ if found && elemVal.Type() == types.OptionalType {
+ opt := elemVal.(*types.Optional)
+ if !opt.HasValue() {
+ return nil, true
+ }
+ if newElem, pruned := p.maybeCreateLiteral(elem.GetId(), opt.GetValue()); pruned {
+ return newElem, true
+ }
+ }
+ return elem, false
+}
+
+func (p *astPruner) maybePruneIn(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ // elem in list
+ call := node.GetCallExpr()
+ val, exists := p.maybeValue(call.GetArgs()[1].GetId())
+ if !exists {
+ return nil, false
+ }
+ if sz, ok := val.(traits.Sizer); ok && sz.Size() == types.IntZero {
+ return p.maybeCreateLiteral(node.GetId(), types.False)
+ }
+ return nil, false
+}
+
+func (p *astPruner) maybePruneLogicalNot(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ call := node.GetCallExpr()
+ arg := call.GetArgs()[0]
+ val, exists := p.maybeValue(arg.GetId())
+ if !exists {
+ return nil, false
+ }
+ if b, ok := val.(types.Bool); ok {
+ return p.maybeCreateLiteral(node.GetId(), !b)
+ }
+ return nil, false
+}
+
+func (p *astPruner) maybePruneOr(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ call := node.GetCallExpr()
+ // We know result is unknown, so we have at least one unknown arg
+ // and if one side is a known value, we know we can ignore it.
+ if v, exists := p.maybeValue(call.GetArgs()[0].GetId()); exists {
+ if v == types.True {
+ return p.maybeCreateLiteral(node.GetId(), types.True)
+ }
+ return call.GetArgs()[1], true
+ }
+ if v, exists := p.maybeValue(call.GetArgs()[1].GetId()); exists {
+ if v == types.True {
+ return p.maybeCreateLiteral(node.GetId(), types.True)
+ }
+ return call.GetArgs()[0], true
+ }
+ return nil, false
+}
+
+func (p *astPruner) maybePruneAnd(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ call := node.GetCallExpr()
+ // We know result is unknown, so we have at least one unknown arg
+ // and if one side is a known value, we know we can ignore it.
+ if v, exists := p.maybeValue(call.GetArgs()[0].GetId()); exists {
+ if v == types.False {
+ return p.maybeCreateLiteral(node.GetId(), types.False)
+ }
+ return call.GetArgs()[1], true
+ }
+ if v, exists := p.maybeValue(call.GetArgs()[1].GetId()); exists {
+ if v == types.False {
+ return p.maybeCreateLiteral(node.GetId(), types.False)
+ }
+ return call.GetArgs()[0], true
+ }
+ return nil, false
+}
+
+func (p *astPruner) maybePruneConditional(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ call := node.GetCallExpr()
+ cond, exists := p.maybeValue(call.GetArgs()[0].GetId())
+ if !exists {
+ return nil, false
+ }
+ if cond.Value().(bool) {
+ return call.GetArgs()[1], true
+ }
+ return call.GetArgs()[2], true
+}
+
+func (p *astPruner) maybePruneFunction(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ if _, exists := p.value(node.GetId()); !exists {
+ return nil, false
+ }
+ call := node.GetCallExpr()
+ if call.Function == operators.LogicalOr {
+ return p.maybePruneOr(node)
+ }
+ if call.Function == operators.LogicalAnd {
+ return p.maybePruneAnd(node)
+ }
+ if call.Function == operators.Conditional {
+ return p.maybePruneConditional(node)
+ }
+ if call.Function == operators.In {
+ return p.maybePruneIn(node)
+ }
+ if call.Function == operators.LogicalNot {
+ return p.maybePruneLogicalNot(node)
+ }
+ return nil, false
+}
+
+func (p *astPruner) maybePrune(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ return p.prune(node)
+}
+
+func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ if node == nil {
+ return node, false
+ }
+ val, valueExists := p.maybeValue(node.GetId())
+ if valueExists {
+ if newNode, ok := p.maybeCreateLiteral(node.GetId(), val); ok {
+ delete(p.macroCalls, node.GetId())
+ return newNode, true
+ }
+ }
+ if macro, found := p.macroCalls[node.GetId()]; found {
+ // Ensure that intermediate values for the comprehension are cleared during pruning
+ compre := node.GetComprehensionExpr()
+ if compre != nil {
+ visit(macro, clearIterVarVisitor(compre.IterVar, p.state))
+ }
+ // prune the expression in terms of the macro call instead of the expanded form.
+ if newMacro, pruned := p.prune(macro); pruned {
+ p.macroCalls[node.GetId()] = newMacro
+ }
+ }
+
+ // We have either an unknown/error value, or something we don't want to
+ // transform, or expression was not evaluated. If possible, drill down
+ // more.
+ switch node.GetExprKind().(type) {
+ case *exprpb.Expr_SelectExpr:
+ if operand, pruned := p.maybePrune(node.GetSelectExpr().GetOperand()); pruned {
+ return &exprpb.Expr{
+ Id: node.GetId(),
+ ExprKind: &exprpb.Expr_SelectExpr{
+ SelectExpr: &exprpb.Expr_Select{
+ Operand: operand,
+ Field: node.GetSelectExpr().GetField(),
+ TestOnly: node.GetSelectExpr().GetTestOnly(),
+ },
+ },
+ }, true
+ }
+ case *exprpb.Expr_CallExpr:
+ var prunedCall bool
+ call := node.GetCallExpr()
+ args := call.GetArgs()
+ newArgs := make([]*exprpb.Expr, len(args))
+ newCall := &exprpb.Expr_Call{
+ Function: call.GetFunction(),
+ Target: call.GetTarget(),
+ Args: newArgs,
+ }
+ for i, arg := range args {
+ newArgs[i] = arg
+ if newArg, prunedArg := p.maybePrune(arg); prunedArg {
+ prunedCall = true
+ newArgs[i] = newArg
+ }
+ }
+ if newTarget, prunedTarget := p.maybePrune(call.GetTarget()); prunedTarget {
+ prunedCall = true
+ newCall.Target = newTarget
+ }
+ newNode := &exprpb.Expr{
+ Id: node.GetId(),
+ ExprKind: &exprpb.Expr_CallExpr{
+ CallExpr: newCall,
+ },
+ }
+ if newExpr, pruned := p.maybePruneFunction(newNode); pruned {
+ newExpr, _ = p.maybePrune(newExpr)
+ return newExpr, true
+ }
+ if prunedCall {
+ return newNode, true
+ }
+ case *exprpb.Expr_ListExpr:
+ elems := node.GetListExpr().GetElements()
+ optIndices := node.GetListExpr().GetOptionalIndices()
+ optIndexMap := map[int32]bool{}
+ for _, i := range optIndices {
+ optIndexMap[i] = true
+ }
+ newOptIndexMap := make(map[int32]bool, len(optIndexMap))
+ newElems := make([]*exprpb.Expr, 0, len(elems))
+ var prunedList bool
+
+ prunedIdx := 0
+ for i, elem := range elems {
+ _, isOpt := optIndexMap[int32(i)]
+ if isOpt {
+ newElem, pruned := p.maybePruneOptional(elem)
+ if pruned {
+ prunedList = true
+ if newElem != nil {
+ newElems = append(newElems, newElem)
+ prunedIdx++
+ }
+ continue
+ }
+ newOptIndexMap[int32(prunedIdx)] = true
+ }
+ if newElem, prunedElem := p.maybePrune(elem); prunedElem {
+ newElems = append(newElems, newElem)
+ prunedList = true
+ } else {
+ newElems = append(newElems, elem)
+ }
+ prunedIdx++
+ }
+ optIndices = make([]int32, len(newOptIndexMap))
+ idx := 0
+ for i := range newOptIndexMap {
+ optIndices[idx] = i
+ idx++
+ }
+ if prunedList {
+ return &exprpb.Expr{
+ Id: node.GetId(),
+ ExprKind: &exprpb.Expr_ListExpr{
+ ListExpr: &exprpb.Expr_CreateList{
+ Elements: newElems,
+ OptionalIndices: optIndices,
+ },
+ },
+ }, true
+ }
+ case *exprpb.Expr_StructExpr:
+ var prunedStruct bool
+ entries := node.GetStructExpr().GetEntries()
+ messageType := node.GetStructExpr().GetMessageName()
+ newEntries := make([]*exprpb.Expr_CreateStruct_Entry, len(entries))
+ for i, entry := range entries {
+ newEntries[i] = entry
+ newKey, prunedKey := p.maybePrune(entry.GetMapKey())
+ newValue, prunedValue := p.maybePrune(entry.GetValue())
+ if !prunedKey && !prunedValue {
+ continue
+ }
+ prunedStruct = true
+ newEntry := &exprpb.Expr_CreateStruct_Entry{
+ Value: newValue,
+ }
+ if messageType != "" {
+ newEntry.KeyKind = &exprpb.Expr_CreateStruct_Entry_FieldKey{
+ FieldKey: entry.GetFieldKey(),
+ }
+ } else {
+ newEntry.KeyKind = &exprpb.Expr_CreateStruct_Entry_MapKey{
+ MapKey: newKey,
+ }
+ }
+ newEntry.OptionalEntry = entry.GetOptionalEntry()
+ newEntries[i] = newEntry
+ }
+ if prunedStruct {
+ return &exprpb.Expr{
+ Id: node.GetId(),
+ ExprKind: &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{
+ MessageName: messageType,
+ Entries: newEntries,
+ },
+ },
+ }, true
+ }
+ case *exprpb.Expr_ComprehensionExpr:
+ compre := node.GetComprehensionExpr()
+ // Only the range of the comprehension is pruned since the state tracking only records
+ // the last iteration of the comprehension and not each step in the evaluation which
+ // means that the any residuals computed in between might be inaccurate.
+ if newRange, pruned := p.maybePrune(compre.GetIterRange()); pruned {
+ return &exprpb.Expr{
+ Id: node.GetId(),
+ ExprKind: &exprpb.Expr_ComprehensionExpr{
+ ComprehensionExpr: &exprpb.Expr_Comprehension{
+ IterVar: compre.GetIterVar(),
+ IterRange: newRange,
+ AccuVar: compre.GetAccuVar(),
+ AccuInit: compre.GetAccuInit(),
+ LoopCondition: compre.GetLoopCondition(),
+ LoopStep: compre.GetLoopStep(),
+ Result: compre.GetResult(),
+ },
+ },
+ }, true
+ }
+ }
+ return node, false
+}
+
+func (p *astPruner) value(id int64) (ref.Val, bool) {
+ val, found := p.state.Value(id)
+ return val, (found && val != nil)
+}
+
+func (p *astPruner) maybeValue(id int64) (ref.Val, bool) {
+ val, found := p.value(id)
+ if !found || types.IsUnknownOrError(val) {
+ return nil, false
+ }
+ return val, true
+}
+
+func (p *astPruner) nextID() int64 {
+ next := p.nextExprID
+ p.nextExprID++
+ return next
+}
+
+type astVisitor struct {
+ // visitEntry is called on every expr node, including those within a map/struct entry.
+ visitExpr func(expr *exprpb.Expr)
+ // visitEntry is called before entering the key, value of a map/struct entry.
+ visitEntry func(entry *exprpb.Expr_CreateStruct_Entry)
+}
+
+func getMaxID(expr *exprpb.Expr) int64 {
+ maxID := int64(1)
+ visit(expr, maxIDVisitor(&maxID))
+ return maxID
+}
+
+func clearIterVarVisitor(varName string, state EvalState) astVisitor {
+ return astVisitor{
+ visitExpr: func(e *exprpb.Expr) {
+ ident := e.GetIdentExpr()
+ if ident != nil && ident.GetName() == varName {
+ state.SetValue(e.GetId(), nil)
+ }
+ },
+ }
+}
+
+func maxIDVisitor(maxID *int64) astVisitor {
+ return astVisitor{
+ visitExpr: func(e *exprpb.Expr) {
+ if e.GetId() >= *maxID {
+ *maxID = e.GetId() + 1
+ }
+ },
+ visitEntry: func(e *exprpb.Expr_CreateStruct_Entry) {
+ if e.GetId() >= *maxID {
+ *maxID = e.GetId() + 1
+ }
+ },
+ }
+}
+
+func visit(expr *exprpb.Expr, visitor astVisitor) {
+ exprs := []*exprpb.Expr{expr}
+ for len(exprs) != 0 {
+ e := exprs[0]
+ if visitor.visitExpr != nil {
+ visitor.visitExpr(e)
+ }
+ exprs = exprs[1:]
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_SelectExpr:
+ exprs = append(exprs, e.GetSelectExpr().GetOperand())
+ case *exprpb.Expr_CallExpr:
+ call := e.GetCallExpr()
+ if call.GetTarget() != nil {
+ exprs = append(exprs, call.GetTarget())
+ }
+ exprs = append(exprs, call.GetArgs()...)
+ case *exprpb.Expr_ComprehensionExpr:
+ compre := e.GetComprehensionExpr()
+ exprs = append(exprs,
+ compre.GetIterRange(),
+ compre.GetAccuInit(),
+ compre.GetLoopCondition(),
+ compre.GetLoopStep(),
+ compre.GetResult())
+ case *exprpb.Expr_ListExpr:
+ list := e.GetListExpr()
+ exprs = append(exprs, list.GetElements()...)
+ case *exprpb.Expr_StructExpr:
+ for _, entry := range e.GetStructExpr().GetEntries() {
+ if visitor.visitEntry != nil {
+ visitor.visitEntry(entry)
+ }
+ if entry.GetMapKey() != nil {
+ exprs = append(exprs, entry.GetMapKey())
+ }
+ exprs = append(exprs, entry.GetValue())
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/runtimecost.go b/vendor/github.com/google/cel-go/interpreter/runtimecost.go
new file mode 100644
index 0000000..b9b307c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/runtimecost.go
@@ -0,0 +1,316 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "math"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// WARNING: Any changes to cost calculations in this file require a corresponding change in checker/cost.go
+
+// ActualCostEstimator provides function call cost estimations at runtime
+// CallCost returns an estimated cost for the function overload invocation with the given args, or nil if it has no
+// estimate to provide. CEL attempts to provide reasonable estimates for its standard function library, so CallCost
+// should typically not need to provide an estimate for CELs standard function.
+type ActualCostEstimator interface {
+ CallCost(function, overloadID string, args []ref.Val, result ref.Val) *uint64
+}
+
+// CostObserver provides an observer that tracks runtime cost.
+func CostObserver(tracker *CostTracker) EvalObserver {
+ observer := func(id int64, programStep any, val ref.Val) {
+ switch t := programStep.(type) {
+ case ConstantQualifier:
+ // TODO: Push identifiers on to the stack before observing constant qualifiers that apply to them
+ // and enable the below pop. Once enabled this can case can be collapsed into the Qualifier case.
+ tracker.cost++
+ case InterpretableConst:
+ // zero cost
+ case InterpretableAttribute:
+ switch a := t.Attr().(type) {
+ case *conditionalAttribute:
+ // Ternary has no direct cost. All cost is from the conditional and the true/false branch expressions.
+ tracker.stack.drop(a.falsy.ID(), a.truthy.ID(), a.expr.ID())
+ default:
+ tracker.stack.drop(t.Attr().ID())
+ tracker.cost += common.SelectAndIdentCost
+ }
+ if !tracker.presenceTestHasCost {
+ if _, isTestOnly := programStep.(*evalTestOnly); isTestOnly {
+ tracker.cost -= common.SelectAndIdentCost
+ }
+ }
+ case *evalExhaustiveConditional:
+ // Ternary has no direct cost. All cost is from the conditional and the true/false branch expressions.
+ tracker.stack.drop(t.attr.falsy.ID(), t.attr.truthy.ID(), t.attr.expr.ID())
+
+ // While the field names are identical, the boolean operation eval structs do not share an interface and so
+ // must be handled individually.
+ case *evalOr:
+ for _, term := range t.terms {
+ tracker.stack.drop(term.ID())
+ }
+ case *evalAnd:
+ for _, term := range t.terms {
+ tracker.stack.drop(term.ID())
+ }
+ case *evalExhaustiveOr:
+ for _, term := range t.terms {
+ tracker.stack.drop(term.ID())
+ }
+ case *evalExhaustiveAnd:
+ for _, term := range t.terms {
+ tracker.stack.drop(term.ID())
+ }
+ case *evalFold:
+ tracker.stack.drop(t.iterRange.ID())
+ case Qualifier:
+ tracker.cost++
+ case InterpretableCall:
+ if argVals, ok := tracker.stack.dropArgs(t.Args()); ok {
+ tracker.cost += tracker.costCall(t, argVals, val)
+ }
+ case InterpretableConstructor:
+ tracker.stack.dropArgs(t.InitVals())
+ switch t.Type() {
+ case types.ListType:
+ tracker.cost += common.ListCreateBaseCost
+ case types.MapType:
+ tracker.cost += common.MapCreateBaseCost
+ default:
+ tracker.cost += common.StructCreateBaseCost
+ }
+ }
+ tracker.stack.push(val, id)
+
+ if tracker.Limit != nil && tracker.cost > *tracker.Limit {
+ panic(EvalCancelledError{Cause: CostLimitExceeded, Message: "operation cancelled: actual cost limit exceeded"})
+ }
+ }
+ return observer
+}
+
+// CostTrackerOption configures the behavior of CostTracker objects.
+type CostTrackerOption func(*CostTracker) error
+
+// CostTrackerLimit sets the runtime limit on the evaluation cost during execution and will terminate the expression
+// evaluation if the limit is exceeded.
+func CostTrackerLimit(limit uint64) CostTrackerOption {
+ return func(tracker *CostTracker) error {
+ tracker.Limit = &limit
+ return nil
+ }
+}
+
+// PresenceTestHasCost determines whether presence testing has a cost of one or zero.
+// Defaults to presence test has a cost of one.
+func PresenceTestHasCost(hasCost bool) CostTrackerOption {
+ return func(tracker *CostTracker) error {
+ tracker.presenceTestHasCost = hasCost
+ return nil
+ }
+}
+
+// NewCostTracker creates a new CostTracker with a given estimator and a set of functional CostTrackerOption values.
+func NewCostTracker(estimator ActualCostEstimator, opts ...CostTrackerOption) (*CostTracker, error) {
+ tracker := &CostTracker{
+ Estimator: estimator,
+ overloadTrackers: map[string]FunctionTracker{},
+ presenceTestHasCost: true,
+ }
+ for _, opt := range opts {
+ err := opt(tracker)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return tracker, nil
+}
+
+// OverloadCostTracker binds an overload ID to a runtime FunctionTracker implementation.
+//
+// OverloadCostTracker instances augment or override ActualCostEstimator decisions, allowing for versioned and/or
+// optional cost tracking changes.
+func OverloadCostTracker(overloadID string, fnTracker FunctionTracker) CostTrackerOption {
+ return func(tracker *CostTracker) error {
+ tracker.overloadTrackers[overloadID] = fnTracker
+ return nil
+ }
+}
+
+// FunctionTracker computes the actual cost of evaluating the functions with the given arguments and result.
+type FunctionTracker func(args []ref.Val, result ref.Val) *uint64
+
+// CostTracker represents the information needed for tracking runtime cost.
+type CostTracker struct {
+ Estimator ActualCostEstimator
+ overloadTrackers map[string]FunctionTracker
+ Limit *uint64
+ presenceTestHasCost bool
+
+ cost uint64
+ stack refValStack
+}
+
+// ActualCost returns the runtime cost
+func (c *CostTracker) ActualCost() uint64 {
+ return c.cost
+}
+
+func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result ref.Val) uint64 {
+ var cost uint64
+ if len(c.overloadTrackers) != 0 {
+ if tracker, found := c.overloadTrackers[call.OverloadID()]; found {
+ callCost := tracker(args, result)
+ if callCost != nil {
+ cost += *callCost
+ return cost
+ }
+ }
+ }
+ if c.Estimator != nil {
+ callCost := c.Estimator.CallCost(call.Function(), call.OverloadID(), args, result)
+ if callCost != nil {
+ cost += *callCost
+ return cost
+ }
+ }
+ // if user didn't specify, the default way of calculating runtime cost would be used.
+ // if user has their own implementation of ActualCostEstimator, make sure to cover the mapping between overloadId and cost calculation
+ switch call.OverloadID() {
+ // O(n) functions
+ case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString, overloads.ExtQuoteString, overloads.ExtFormatString:
+ cost += uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor))
+ case overloads.InList:
+ // If a list is composed entirely of constant values this is O(1), but we don't account for that here.
+ // We just assume all list containment checks are O(n).
+ cost += c.actualSize(args[1])
+ // O(min(m, n)) functions
+ case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString,
+ overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes,
+ overloads.Equals, overloads.NotEquals:
+ // When we check the equality of 2 scalar values (e.g. 2 integers, 2 floating-point numbers, 2 booleans etc.),
+ // the CostTracker.actualSize() function by definition returns 1 for each operand, resulting in an overall cost
+ // of 1.
+ lhsSize := c.actualSize(args[0])
+ rhsSize := c.actualSize(args[1])
+ minSize := lhsSize
+ if rhsSize < minSize {
+ minSize = rhsSize
+ }
+ cost += uint64(math.Ceil(float64(minSize) * common.StringTraversalCostFactor))
+ // O(m+n) functions
+ case overloads.AddString, overloads.AddBytes:
+ // In the worst case scenario, we would need to reallocate a new backing store and copy both operands over.
+ cost += uint64(math.Ceil(float64(c.actualSize(args[0])+c.actualSize(args[1])) * common.StringTraversalCostFactor))
+ // O(nm) functions
+ case overloads.MatchesString:
+ // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL
+ // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0
+ // in case where string is empty but regex is still expensive.
+ strCost := uint64(math.Ceil((1.0 + float64(c.actualSize(args[0]))) * common.StringTraversalCostFactor))
+ // We don't know how many expressions are in the regex, just the string length (a huge
+ // improvement here would be to somehow get a count the number of expressions in the regex or
+ // how many states are in the regex state machine and use that to measure regex cost).
+ // For now, we're making a guess that each expression in a regex is typically at least 4 chars
+ // in length.
+ regexCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.RegexStringLengthCostFactor))
+ cost += strCost * regexCost
+ case overloads.ContainsString:
+ strCost := uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor))
+ substrCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.StringTraversalCostFactor))
+ cost += strCost * substrCost
+
+ default:
+ // The following operations are assumed to have O(1) complexity.
+ // - AddList due to the implementation. Index lookup can be O(c) the
+ // number of concatenated lists, but we don't track that is cost calculations.
+ // - Conversions, since none perform a traversal of a type of unbound length.
+ // - Computing the size of strings, byte sequences, lists and maps.
+ // - Logical operations and all operators on fixed width scalars (comparisons, equality)
+ // - Any functions that don't have a declared cost either here or in provided ActualCostEstimator.
+ cost++
+
+ }
+ return cost
+}
+
+// actualSize returns the size of value
+func (c *CostTracker) actualSize(value ref.Val) uint64 {
+ if sz, ok := value.(traits.Sizer); ok {
+ return uint64(sz.Size().(types.Int))
+ }
+ return 1
+}
+
+type stackVal struct {
+ Val ref.Val
+ ID int64
+}
+
+// refValStack keeps track of values of the stack for cost calculation purposes
+type refValStack []stackVal
+
+func (s *refValStack) push(val ref.Val, id int64) {
+ value := stackVal{Val: val, ID: id}
+ *s = append(*s, value)
+}
+
+// TODO: Allowing drop and dropArgs to remove stack items above the IDs they are provided is a workaround. drop and dropArgs
+// should find and remove only the stack items matching the provided IDs once all attributes are properly pushed and popped from stack.
+
+// drop searches the stack for each ID and removes the ID and all stack items above it.
+// If none of the IDs are found, the stack is not modified.
+// WARNING: It is possible for multiple expressions with the same ID to exist (due to how macros are implemented) so it's
+// possible that a dropped ID will remain on the stack. They should be removed when IDs on the stack are popped.
+func (s *refValStack) drop(ids ...int64) {
+ for _, id := range ids {
+ for idx := len(*s) - 1; idx >= 0; idx-- {
+ if (*s)[idx].ID == id {
+ *s = (*s)[:idx]
+ break
+ }
+ }
+ }
+}
+
+// dropArgs searches the stack for all the args by their IDs, accumulates their associated ref.Vals and drops any
+// stack items above any of the arg IDs. If any of the IDs are not found the stack, false is returned.
+// Args are assumed to be found in the stack in reverse order, i.e. the last arg is expected to be found highest in
+// the stack.
+// WARNING: It is possible for multiple expressions with the same ID to exist (due to how macros are implemented) so it's
+// possible that a dropped ID will remain on the stack. They should be removed when IDs on the stack are popped.
+func (s *refValStack) dropArgs(args []Interpretable) ([]ref.Val, bool) {
+ result := make([]ref.Val, len(args))
+argloop:
+ for nIdx := len(args) - 1; nIdx >= 0; nIdx-- {
+ for idx := len(*s) - 1; idx >= 0; idx-- {
+ if (*s)[idx].ID == args[nIdx].ID() {
+ el := (*s)[idx]
+ *s = (*s)[:idx]
+ result[nIdx] = el.Val
+ continue argloop
+ }
+ }
+ return nil, false
+ }
+ return result, true
+}
diff --git a/vendor/github.com/google/cel-go/parser/BUILD.bazel b/vendor/github.com/google/cel-go/parser/BUILD.bazel
new file mode 100644
index 0000000..67ecc95
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/BUILD.bazel
@@ -0,0 +1,53 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "errors.go",
+ "helper.go",
+ "input.go",
+ "macro.go",
+ "options.go",
+ "parser.go",
+ "unescape.go",
+ "unparser.go",
+ ],
+ importpath = "github.com/google/cel-go/parser",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//common:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/runes:go_default_library",
+ "//parser/gen:go_default_library",
+ "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "helper_test.go",
+ "parser_test.go",
+ "unescape_test.go",
+ "unparser_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//common/debug:go_default_library",
+ "//parser/gen:go_default_library",
+ "//test:go_default_library",
+ "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//testing/protocmp:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/parser/errors.go b/vendor/github.com/google/cel-go/parser/errors.go
new file mode 100644
index 0000000..93ae7a3
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/errors.go
@@ -0,0 +1,43 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common"
+)
+
+// parseErrors is a specialization of Errors.
+type parseErrors struct {
+ errs *common.Errors
+}
+
+// errorCount indicates the number of errors reported.
+func (e *parseErrors) errorCount() int {
+ return len(e.errs.GetErrors())
+}
+
+func (e *parseErrors) internalError(message string) {
+ e.errs.ReportErrorAtID(0, common.NoLocation, message)
+}
+
+func (e *parseErrors) syntaxError(l common.Location, message string) {
+ e.errs.ReportErrorAtID(0, l, fmt.Sprintf("Syntax error: %s", message))
+}
+
+func (e *parseErrors) reportErrorAtID(id int64, l common.Location, message string, args ...any) {
+ e.errs.ReportErrorAtID(id, l, message, args...)
+}
diff --git a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
new file mode 100644
index 0000000..654d1de
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
@@ -0,0 +1,26 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//parser:__subpackages__"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "cel_base_listener.go",
+ "cel_base_visitor.go",
+ "cel_lexer.go",
+ "cel_listener.go",
+ "cel_parser.go",
+ "cel_visitor.go",
+ ],
+ data = [
+ "CEL.tokens",
+ "CELLexer.tokens",
+ ],
+ importpath = "github.com/google/cel-go/parser/gen",
+ deps = [
+ "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.g4 b/vendor/github.com/google/cel-go/parser/gen/CEL.g4
new file mode 100644
index 0000000..b011da8
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/CEL.g4
@@ -0,0 +1,200 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+grammar CEL;
+
+// Grammar Rules
+// =============
+
+start
+ : e=expr EOF
+ ;
+
+expr
+ : e=conditionalOr (op='?' e1=conditionalOr ':' e2=expr)?
+ ;
+
+conditionalOr
+ : e=conditionalAnd (ops+='||' e1+=conditionalAnd)*
+ ;
+
+conditionalAnd
+ : e=relation (ops+='&&' e1+=relation)*
+ ;
+
+relation
+ : calc
+ | relation op=('<'|'<='|'>='|'>'|'=='|'!='|'in') relation
+ ;
+
+calc
+ : unary
+ | calc op=('*'|'/'|'%') calc
+ | calc op=('+'|'-') calc
+ ;
+
+unary
+ : member # MemberExpr
+ | (ops+='!')+ member # LogicalNot
+ | (ops+='-')+ member # Negate
+ ;
+
+member
+ : primary # PrimaryExpr
+ | member op='.' (opt='?')? id=IDENTIFIER # Select
+ | member op='.' id=IDENTIFIER open='(' args=exprList? ')' # MemberCall
+ | member op='[' (opt='?')? index=expr ']' # Index
+ ;
+
+primary
+ : leadingDot='.'? id=IDENTIFIER (op='(' args=exprList? ')')? # IdentOrGlobalCall
+ | '(' e=expr ')' # Nested
+ | op='[' elems=listInit? ','? ']' # CreateList
+ | op='{' entries=mapInitializerList? ','? '}' # CreateStruct
+ | leadingDot='.'? ids+=IDENTIFIER (ops+='.' ids+=IDENTIFIER)*
+ op='{' entries=fieldInitializerList? ','? '}' # CreateMessage
+ | literal # ConstantLiteral
+ ;
+
+exprList
+ : e+=expr (',' e+=expr)*
+ ;
+
+listInit
+ : elems+=optExpr (',' elems+=optExpr)*
+ ;
+
+fieldInitializerList
+ : fields+=optField cols+=':' values+=expr (',' fields+=optField cols+=':' values+=expr)*
+ ;
+
+optField
+ : (opt='?')? IDENTIFIER
+ ;
+
+mapInitializerList
+ : keys+=optExpr cols+=':' values+=expr (',' keys+=optExpr cols+=':' values+=expr)*
+ ;
+
+optExpr
+ : (opt='?')? e=expr
+ ;
+
+literal
+ : sign=MINUS? tok=NUM_INT # Int
+ | tok=NUM_UINT # Uint
+ | sign=MINUS? tok=NUM_FLOAT # Double
+ | tok=STRING # String
+ | tok=BYTES # Bytes
+ | tok=CEL_TRUE # BoolTrue
+ | tok=CEL_FALSE # BoolFalse
+ | tok=NUL # Null
+ ;
+
+// Lexer Rules
+// ===========
+
+EQUALS : '==';
+NOT_EQUALS : '!=';
+IN: 'in';
+LESS : '<';
+LESS_EQUALS : '<=';
+GREATER_EQUALS : '>=';
+GREATER : '>';
+LOGICAL_AND : '&&';
+LOGICAL_OR : '||';
+
+LBRACKET : '[';
+RPRACKET : ']';
+LBRACE : '{';
+RBRACE : '}';
+LPAREN : '(';
+RPAREN : ')';
+DOT : '.';
+COMMA : ',';
+MINUS : '-';
+EXCLAM : '!';
+QUESTIONMARK : '?';
+COLON : ':';
+PLUS : '+';
+STAR : '*';
+SLASH : '/';
+PERCENT : '%';
+CEL_TRUE : 'true';
+CEL_FALSE : 'false';
+NUL : 'null';
+
+fragment BACKSLASH : '\\';
+fragment LETTER : 'A'..'Z' | 'a'..'z' ;
+fragment DIGIT : '0'..'9' ;
+fragment EXPONENT : ('e' | 'E') ( '+' | '-' )? DIGIT+ ;
+fragment HEXDIGIT : ('0'..'9'|'a'..'f'|'A'..'F') ;
+fragment RAW : 'r' | 'R';
+
+fragment ESC_SEQ
+ : ESC_CHAR_SEQ
+ | ESC_BYTE_SEQ
+ | ESC_UNI_SEQ
+ | ESC_OCT_SEQ
+ ;
+
+fragment ESC_CHAR_SEQ
+ : BACKSLASH ('a'|'b'|'f'|'n'|'r'|'t'|'v'|'"'|'\''|'\\'|'?'|'`')
+ ;
+
+fragment ESC_OCT_SEQ
+ : BACKSLASH ('0'..'3') ('0'..'7') ('0'..'7')
+ ;
+
+fragment ESC_BYTE_SEQ
+ : BACKSLASH ( 'x' | 'X' ) HEXDIGIT HEXDIGIT
+ ;
+
+fragment ESC_UNI_SEQ
+ : BACKSLASH 'u' HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT
+ | BACKSLASH 'U' HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT
+ ;
+
+WHITESPACE : ( '\t' | ' ' | '\r' | '\n'| '\u000C' )+ -> channel(HIDDEN) ;
+COMMENT : '//' (~'\n')* -> channel(HIDDEN) ;
+
+NUM_FLOAT
+ : ( DIGIT+ ('.' DIGIT+) EXPONENT?
+ | DIGIT+ EXPONENT
+ | '.' DIGIT+ EXPONENT?
+ )
+ ;
+
+NUM_INT
+ : ( DIGIT+ | '0x' HEXDIGIT+ );
+
+NUM_UINT
+ : DIGIT+ ( 'u' | 'U' )
+ | '0x' HEXDIGIT+ ( 'u' | 'U' )
+ ;
+
+STRING
+ : '"' (ESC_SEQ | ~('\\'|'"'|'\n'|'\r'))* '"'
+ | '\'' (ESC_SEQ | ~('\\'|'\''|'\n'|'\r'))* '\''
+ | '"""' (ESC_SEQ | ~('\\'))*? '"""'
+ | '\'\'\'' (ESC_SEQ | ~('\\'))*? '\'\'\''
+ | RAW '"' ~('"'|'\n'|'\r')* '"'
+ | RAW '\'' ~('\''|'\n'|'\r')* '\''
+ | RAW '"""' .*? '"""'
+ | RAW '\'\'\'' .*? '\'\'\''
+ ;
+
+BYTES : ('b' | 'B') STRING;
+
+IDENTIFIER : (LETTER | '_') ( LETTER | DIGIT | '_')*;
diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.interp b/vendor/github.com/google/cel-go/parser/gen/CEL.interp
new file mode 100644
index 0000000..75b8bb3
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/CEL.interp
@@ -0,0 +1,99 @@
+token literal names:
+null
+'=='
+'!='
+'in'
+'<'
+'<='
+'>='
+'>'
+'&&'
+'||'
+'['
+']'
+'{'
+'}'
+'('
+')'
+'.'
+','
+'-'
+'!'
+'?'
+':'
+'+'
+'*'
+'/'
+'%'
+'true'
+'false'
+'null'
+null
+null
+null
+null
+null
+null
+null
+null
+
+token symbolic names:
+null
+EQUALS
+NOT_EQUALS
+IN
+LESS
+LESS_EQUALS
+GREATER_EQUALS
+GREATER
+LOGICAL_AND
+LOGICAL_OR
+LBRACKET
+RPRACKET
+LBRACE
+RBRACE
+LPAREN
+RPAREN
+DOT
+COMMA
+MINUS
+EXCLAM
+QUESTIONMARK
+COLON
+PLUS
+STAR
+SLASH
+PERCENT
+CEL_TRUE
+CEL_FALSE
+NUL
+WHITESPACE
+COMMENT
+NUM_FLOAT
+NUM_INT
+NUM_UINT
+STRING
+BYTES
+IDENTIFIER
+
+rule names:
+start
+expr
+conditionalOr
+conditionalAnd
+relation
+calc
+unary
+member
+primary
+exprList
+listInit
+fieldInitializerList
+optField
+mapInitializerList
+optExpr
+literal
+
+
+atn:
+[4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, 240, 248]
\ No newline at end of file
diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.tokens b/vendor/github.com/google/cel-go/parser/gen/CEL.tokens
new file mode 100644
index 0000000..b305bda
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/CEL.tokens
@@ -0,0 +1,64 @@
+EQUALS=1
+NOT_EQUALS=2
+IN=3
+LESS=4
+LESS_EQUALS=5
+GREATER_EQUALS=6
+GREATER=7
+LOGICAL_AND=8
+LOGICAL_OR=9
+LBRACKET=10
+RPRACKET=11
+LBRACE=12
+RBRACE=13
+LPAREN=14
+RPAREN=15
+DOT=16
+COMMA=17
+MINUS=18
+EXCLAM=19
+QUESTIONMARK=20
+COLON=21
+PLUS=22
+STAR=23
+SLASH=24
+PERCENT=25
+CEL_TRUE=26
+CEL_FALSE=27
+NUL=28
+WHITESPACE=29
+COMMENT=30
+NUM_FLOAT=31
+NUM_INT=32
+NUM_UINT=33
+STRING=34
+BYTES=35
+IDENTIFIER=36
+'=='=1
+'!='=2
+'in'=3
+'<'=4
+'<='=5
+'>='=6
+'>'=7
+'&&'=8
+'||'=9
+'['=10
+']'=11
+'{'=12
+'}'=13
+'('=14
+')'=15
+'.'=16
+','=17
+'-'=18
+'!'=19
+'?'=20
+':'=21
+'+'=22
+'*'=23
+'/'=24
+'%'=25
+'true'=26
+'false'=27
+'null'=28
diff --git a/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp b/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp
new file mode 100644
index 0000000..26e7f47
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp
@@ -0,0 +1,136 @@
+token literal names:
+null
+'=='
+'!='
+'in'
+'<'
+'<='
+'>='
+'>'
+'&&'
+'||'
+'['
+']'
+'{'
+'}'
+'('
+')'
+'.'
+','
+'-'
+'!'
+'?'
+':'
+'+'
+'*'
+'/'
+'%'
+'true'
+'false'
+'null'
+null
+null
+null
+null
+null
+null
+null
+null
+
+token symbolic names:
+null
+EQUALS
+NOT_EQUALS
+IN
+LESS
+LESS_EQUALS
+GREATER_EQUALS
+GREATER
+LOGICAL_AND
+LOGICAL_OR
+LBRACKET
+RPRACKET
+LBRACE
+RBRACE
+LPAREN
+RPAREN
+DOT
+COMMA
+MINUS
+EXCLAM
+QUESTIONMARK
+COLON
+PLUS
+STAR
+SLASH
+PERCENT
+CEL_TRUE
+CEL_FALSE
+NUL
+WHITESPACE
+COMMENT
+NUM_FLOAT
+NUM_INT
+NUM_UINT
+STRING
+BYTES
+IDENTIFIER
+
+rule names:
+EQUALS
+NOT_EQUALS
+IN
+LESS
+LESS_EQUALS
+GREATER_EQUALS
+GREATER
+LOGICAL_AND
+LOGICAL_OR
+LBRACKET
+RPRACKET
+LBRACE
+RBRACE
+LPAREN
+RPAREN
+DOT
+COMMA
+MINUS
+EXCLAM
+QUESTIONMARK
+COLON
+PLUS
+STAR
+SLASH
+PERCENT
+CEL_TRUE
+CEL_FALSE
+NUL
+BACKSLASH
+LETTER
+DIGIT
+EXPONENT
+HEXDIGIT
+RAW
+ESC_SEQ
+ESC_CHAR_SEQ
+ESC_OCT_SEQ
+ESC_BYTE_SEQ
+ESC_UNI_SEQ
+WHITESPACE
+COMMENT
+NUM_FLOAT
+NUM_INT
+NUM_UINT
+STRING
+BYTES
+IDENTIFIER
+
+channel names:
+DEFAULT_TOKEN_CHANNEL
+HIDDEN
+
+mode names:
+DEFAULT_MODE
+
+atn:
+[4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, 413, 418, 420, 1, 0, 1, 0]
\ No newline at end of file
diff --git a/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens b/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens
new file mode 100644
index 0000000..b305bda
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens
@@ -0,0 +1,64 @@
+EQUALS=1
+NOT_EQUALS=2
+IN=3
+LESS=4
+LESS_EQUALS=5
+GREATER_EQUALS=6
+GREATER=7
+LOGICAL_AND=8
+LOGICAL_OR=9
+LBRACKET=10
+RPRACKET=11
+LBRACE=12
+RBRACE=13
+LPAREN=14
+RPAREN=15
+DOT=16
+COMMA=17
+MINUS=18
+EXCLAM=19
+QUESTIONMARK=20
+COLON=21
+PLUS=22
+STAR=23
+SLASH=24
+PERCENT=25
+CEL_TRUE=26
+CEL_FALSE=27
+NUL=28
+WHITESPACE=29
+COMMENT=30
+NUM_FLOAT=31
+NUM_INT=32
+NUM_UINT=33
+STRING=34
+BYTES=35
+IDENTIFIER=36
+'=='=1
+'!='=2
+'in'=3
+'<'=4
+'<='=5
+'>='=6
+'>'=7
+'&&'=8
+'||'=9
+'['=10
+']'=11
+'{'=12
+'}'=13
+'('=14
+')'=15
+'.'=16
+','=17
+'-'=18
+'!'=19
+'?'=20
+':'=21
+'+'=22
+'*'=23
+'/'=24
+'%'=25
+'true'=26
+'false'=27
+'null'=28
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
new file mode 100644
index 0000000..0247f47
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
@@ -0,0 +1,219 @@
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
+
+package gen // CEL
+import "github.com/antlr/antlr4/runtime/Go/antlr/v4"
+
+// BaseCELListener is a complete listener for a parse tree produced by CELParser.
+type BaseCELListener struct{}
+
+var _ CELListener = &BaseCELListener{}
+
+// VisitTerminal is called when a terminal node is visited.
+func (s *BaseCELListener) VisitTerminal(node antlr.TerminalNode) {}
+
+// VisitErrorNode is called when an error node is visited.
+func (s *BaseCELListener) VisitErrorNode(node antlr.ErrorNode) {}
+
+// EnterEveryRule is called when any rule is entered.
+func (s *BaseCELListener) EnterEveryRule(ctx antlr.ParserRuleContext) {}
+
+// ExitEveryRule is called when any rule is exited.
+func (s *BaseCELListener) ExitEveryRule(ctx antlr.ParserRuleContext) {}
+
+// EnterStart is called when production start is entered.
+func (s *BaseCELListener) EnterStart(ctx *StartContext) {}
+
+// ExitStart is called when production start is exited.
+func (s *BaseCELListener) ExitStart(ctx *StartContext) {}
+
+// EnterExpr is called when production expr is entered.
+func (s *BaseCELListener) EnterExpr(ctx *ExprContext) {}
+
+// ExitExpr is called when production expr is exited.
+func (s *BaseCELListener) ExitExpr(ctx *ExprContext) {}
+
+// EnterConditionalOr is called when production conditionalOr is entered.
+func (s *BaseCELListener) EnterConditionalOr(ctx *ConditionalOrContext) {}
+
+// ExitConditionalOr is called when production conditionalOr is exited.
+func (s *BaseCELListener) ExitConditionalOr(ctx *ConditionalOrContext) {}
+
+// EnterConditionalAnd is called when production conditionalAnd is entered.
+func (s *BaseCELListener) EnterConditionalAnd(ctx *ConditionalAndContext) {}
+
+// ExitConditionalAnd is called when production conditionalAnd is exited.
+func (s *BaseCELListener) ExitConditionalAnd(ctx *ConditionalAndContext) {}
+
+// EnterRelation is called when production relation is entered.
+func (s *BaseCELListener) EnterRelation(ctx *RelationContext) {}
+
+// ExitRelation is called when production relation is exited.
+func (s *BaseCELListener) ExitRelation(ctx *RelationContext) {}
+
+// EnterCalc is called when production calc is entered.
+func (s *BaseCELListener) EnterCalc(ctx *CalcContext) {}
+
+// ExitCalc is called when production calc is exited.
+func (s *BaseCELListener) ExitCalc(ctx *CalcContext) {}
+
+// EnterMemberExpr is called when production MemberExpr is entered.
+func (s *BaseCELListener) EnterMemberExpr(ctx *MemberExprContext) {}
+
+// ExitMemberExpr is called when production MemberExpr is exited.
+func (s *BaseCELListener) ExitMemberExpr(ctx *MemberExprContext) {}
+
+// EnterLogicalNot is called when production LogicalNot is entered.
+func (s *BaseCELListener) EnterLogicalNot(ctx *LogicalNotContext) {}
+
+// ExitLogicalNot is called when production LogicalNot is exited.
+func (s *BaseCELListener) ExitLogicalNot(ctx *LogicalNotContext) {}
+
+// EnterNegate is called when production Negate is entered.
+func (s *BaseCELListener) EnterNegate(ctx *NegateContext) {}
+
+// ExitNegate is called when production Negate is exited.
+func (s *BaseCELListener) ExitNegate(ctx *NegateContext) {}
+
+// EnterMemberCall is called when production MemberCall is entered.
+func (s *BaseCELListener) EnterMemberCall(ctx *MemberCallContext) {}
+
+// ExitMemberCall is called when production MemberCall is exited.
+func (s *BaseCELListener) ExitMemberCall(ctx *MemberCallContext) {}
+
+// EnterSelect is called when production Select is entered.
+func (s *BaseCELListener) EnterSelect(ctx *SelectContext) {}
+
+// ExitSelect is called when production Select is exited.
+func (s *BaseCELListener) ExitSelect(ctx *SelectContext) {}
+
+// EnterPrimaryExpr is called when production PrimaryExpr is entered.
+func (s *BaseCELListener) EnterPrimaryExpr(ctx *PrimaryExprContext) {}
+
+// ExitPrimaryExpr is called when production PrimaryExpr is exited.
+func (s *BaseCELListener) ExitPrimaryExpr(ctx *PrimaryExprContext) {}
+
+// EnterIndex is called when production Index is entered.
+func (s *BaseCELListener) EnterIndex(ctx *IndexContext) {}
+
+// ExitIndex is called when production Index is exited.
+func (s *BaseCELListener) ExitIndex(ctx *IndexContext) {}
+
+// EnterIdentOrGlobalCall is called when production IdentOrGlobalCall is entered.
+func (s *BaseCELListener) EnterIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {}
+
+// ExitIdentOrGlobalCall is called when production IdentOrGlobalCall is exited.
+func (s *BaseCELListener) ExitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {}
+
+// EnterNested is called when production Nested is entered.
+func (s *BaseCELListener) EnterNested(ctx *NestedContext) {}
+
+// ExitNested is called when production Nested is exited.
+func (s *BaseCELListener) ExitNested(ctx *NestedContext) {}
+
+// EnterCreateList is called when production CreateList is entered.
+func (s *BaseCELListener) EnterCreateList(ctx *CreateListContext) {}
+
+// ExitCreateList is called when production CreateList is exited.
+func (s *BaseCELListener) ExitCreateList(ctx *CreateListContext) {}
+
+// EnterCreateStruct is called when production CreateStruct is entered.
+func (s *BaseCELListener) EnterCreateStruct(ctx *CreateStructContext) {}
+
+// ExitCreateStruct is called when production CreateStruct is exited.
+func (s *BaseCELListener) ExitCreateStruct(ctx *CreateStructContext) {}
+
+// EnterCreateMessage is called when production CreateMessage is entered.
+func (s *BaseCELListener) EnterCreateMessage(ctx *CreateMessageContext) {}
+
+// ExitCreateMessage is called when production CreateMessage is exited.
+func (s *BaseCELListener) ExitCreateMessage(ctx *CreateMessageContext) {}
+
+// EnterConstantLiteral is called when production ConstantLiteral is entered.
+func (s *BaseCELListener) EnterConstantLiteral(ctx *ConstantLiteralContext) {}
+
+// ExitConstantLiteral is called when production ConstantLiteral is exited.
+func (s *BaseCELListener) ExitConstantLiteral(ctx *ConstantLiteralContext) {}
+
+// EnterExprList is called when production exprList is entered.
+func (s *BaseCELListener) EnterExprList(ctx *ExprListContext) {}
+
+// ExitExprList is called when production exprList is exited.
+func (s *BaseCELListener) ExitExprList(ctx *ExprListContext) {}
+
+// EnterListInit is called when production listInit is entered.
+func (s *BaseCELListener) EnterListInit(ctx *ListInitContext) {}
+
+// ExitListInit is called when production listInit is exited.
+func (s *BaseCELListener) ExitListInit(ctx *ListInitContext) {}
+
+// EnterFieldInitializerList is called when production fieldInitializerList is entered.
+func (s *BaseCELListener) EnterFieldInitializerList(ctx *FieldInitializerListContext) {}
+
+// ExitFieldInitializerList is called when production fieldInitializerList is exited.
+func (s *BaseCELListener) ExitFieldInitializerList(ctx *FieldInitializerListContext) {}
+
+// EnterOptField is called when production optField is entered.
+func (s *BaseCELListener) EnterOptField(ctx *OptFieldContext) {}
+
+// ExitOptField is called when production optField is exited.
+func (s *BaseCELListener) ExitOptField(ctx *OptFieldContext) {}
+
+// EnterMapInitializerList is called when production mapInitializerList is entered.
+func (s *BaseCELListener) EnterMapInitializerList(ctx *MapInitializerListContext) {}
+
+// ExitMapInitializerList is called when production mapInitializerList is exited.
+func (s *BaseCELListener) ExitMapInitializerList(ctx *MapInitializerListContext) {}
+
+// EnterOptExpr is called when production optExpr is entered.
+func (s *BaseCELListener) EnterOptExpr(ctx *OptExprContext) {}
+
+// ExitOptExpr is called when production optExpr is exited.
+func (s *BaseCELListener) ExitOptExpr(ctx *OptExprContext) {}
+
+// EnterInt is called when production Int is entered.
+func (s *BaseCELListener) EnterInt(ctx *IntContext) {}
+
+// ExitInt is called when production Int is exited.
+func (s *BaseCELListener) ExitInt(ctx *IntContext) {}
+
+// EnterUint is called when production Uint is entered.
+func (s *BaseCELListener) EnterUint(ctx *UintContext) {}
+
+// ExitUint is called when production Uint is exited.
+func (s *BaseCELListener) ExitUint(ctx *UintContext) {}
+
+// EnterDouble is called when production Double is entered.
+func (s *BaseCELListener) EnterDouble(ctx *DoubleContext) {}
+
+// ExitDouble is called when production Double is exited.
+func (s *BaseCELListener) ExitDouble(ctx *DoubleContext) {}
+
+// EnterString is called when production String is entered.
+func (s *BaseCELListener) EnterString(ctx *StringContext) {}
+
+// ExitString is called when production String is exited.
+func (s *BaseCELListener) ExitString(ctx *StringContext) {}
+
+// EnterBytes is called when production Bytes is entered.
+func (s *BaseCELListener) EnterBytes(ctx *BytesContext) {}
+
+// ExitBytes is called when production Bytes is exited.
+func (s *BaseCELListener) ExitBytes(ctx *BytesContext) {}
+
+// EnterBoolTrue is called when production BoolTrue is entered.
+func (s *BaseCELListener) EnterBoolTrue(ctx *BoolTrueContext) {}
+
+// ExitBoolTrue is called when production BoolTrue is exited.
+func (s *BaseCELListener) ExitBoolTrue(ctx *BoolTrueContext) {}
+
+// EnterBoolFalse is called when production BoolFalse is entered.
+func (s *BaseCELListener) EnterBoolFalse(ctx *BoolFalseContext) {}
+
+// ExitBoolFalse is called when production BoolFalse is exited.
+func (s *BaseCELListener) ExitBoolFalse(ctx *BoolFalseContext) {}
+
+// EnterNull is called when production Null is entered.
+func (s *BaseCELListener) EnterNull(ctx *NullContext) {}
+
+// ExitNull is called when production Null is exited.
+func (s *BaseCELListener) ExitNull(ctx *NullContext) {}
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
new file mode 100644
index 0000000..52a7f4d
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
@@ -0,0 +1,140 @@
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
+
+package gen // CEL
+import "github.com/antlr/antlr4/runtime/Go/antlr/v4"
+
+type BaseCELVisitor struct {
+ *antlr.BaseParseTreeVisitor
+}
+
+func (v *BaseCELVisitor) VisitStart(ctx *StartContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitExpr(ctx *ExprContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitConditionalOr(ctx *ConditionalOrContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitConditionalAnd(ctx *ConditionalAndContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitRelation(ctx *RelationContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitCalc(ctx *CalcContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitMemberExpr(ctx *MemberExprContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitLogicalNot(ctx *LogicalNotContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitNegate(ctx *NegateContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitMemberCall(ctx *MemberCallContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitSelect(ctx *SelectContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitPrimaryExpr(ctx *PrimaryExprContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitIndex(ctx *IndexContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitNested(ctx *NestedContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitCreateList(ctx *CreateListContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitCreateStruct(ctx *CreateStructContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitCreateMessage(ctx *CreateMessageContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitConstantLiteral(ctx *ConstantLiteralContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitExprList(ctx *ExprListContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitListInit(ctx *ListInitContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitFieldInitializerList(ctx *FieldInitializerListContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitOptField(ctx *OptFieldContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitMapInitializerList(ctx *MapInitializerListContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitOptExpr(ctx *OptExprContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitInt(ctx *IntContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitUint(ctx *UintContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitDouble(ctx *DoubleContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitString(ctx *StringContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitBytes(ctx *BytesContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitBoolTrue(ctx *BoolTrueContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitBoolFalse(ctx *BoolFalseContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitNull(ctx *NullContext) interface{} {
+ return v.VisitChildren(ctx)
+}
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
new file mode 100644
index 0000000..98ddc06
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
@@ -0,0 +1,345 @@
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
+
+package gen
+
+import (
+ "fmt"
+ "sync"
+ "unicode"
+
+ "github.com/antlr/antlr4/runtime/Go/antlr/v4"
+)
+
+// Suppress unused import error
+var _ = fmt.Printf
+var _ = sync.Once{}
+var _ = unicode.IsLetter
+
+type CELLexer struct {
+ *antlr.BaseLexer
+ channelNames []string
+ modeNames []string
+ // TODO: EOF string
+}
+
+var cellexerLexerStaticData struct {
+ once sync.Once
+ serializedATN []int32
+ channelNames []string
+ modeNames []string
+ literalNames []string
+ symbolicNames []string
+ ruleNames []string
+ predictionContextCache *antlr.PredictionContextCache
+ atn *antlr.ATN
+ decisionToDFA []*antlr.DFA
+}
+
+func cellexerLexerInit() {
+ staticData := &cellexerLexerStaticData
+ staticData.channelNames = []string{
+ "DEFAULT_TOKEN_CHANNEL", "HIDDEN",
+ }
+ staticData.modeNames = []string{
+ "DEFAULT_MODE",
+ }
+ staticData.literalNames = []string{
+ "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'",
+ "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'",
+ "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'",
+ }
+ staticData.symbolicNames = []string{
+ "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
+ "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
+ "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
+ "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
+ "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT",
+ "STRING", "BYTES", "IDENTIFIER",
+ }
+ staticData.ruleNames = []string{
+ "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
+ "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
+ "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
+ "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
+ "NUL", "BACKSLASH", "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW",
+ "ESC_SEQ", "ESC_CHAR_SEQ", "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ",
+ "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING",
+ "BYTES", "IDENTIFIER",
+ }
+ staticData.predictionContextCache = antlr.NewPredictionContextCache()
+ staticData.serializedATN = []int32{
+ 4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
+ 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2,
+ 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15,
+ 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7,
+ 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25,
+ 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2,
+ 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36,
+ 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7,
+ 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46,
+ 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4,
+ 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8,
+ 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13,
+ 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1,
+ 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24,
+ 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1,
+ 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29,
+ 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31,
+ 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1,
+ 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36,
+ 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1,
+ 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38,
+ 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39,
+ 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40,
+ 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11,
+ 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253,
+ 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261,
+ 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1,
+ 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11,
+ 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42,
+ 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43,
+ 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43,
+ 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44,
+ 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5,
+ 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44,
+ 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1,
+ 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349,
+ 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1,
+ 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44,
+ 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1,
+ 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44,
+ 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1,
+ 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44,
+ 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1,
+ 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46,
+ 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7,
+ 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27,
+ 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45,
+ 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0,
+ 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31,
+ 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122,
+ 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97,
+ 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96,
+ 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120,
+ 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117,
+ 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92,
+ 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39,
+ 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5,
+ 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13,
+ 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0,
+ 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0,
+ 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0,
+ 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0,
+ 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1,
+ 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81,
+ 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0,
+ 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0,
+ 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0,
+ 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17,
+ 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1,
+ 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0,
+ 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138,
+ 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0,
+ 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152,
+ 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0,
+ 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183,
+ 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0,
+ 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227,
+ 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0,
+ 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413,
+ 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0,
+ 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102,
+ 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5,
+ 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0,
+ 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111,
+ 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5,
+ 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124,
+ 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0,
+ 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124,
+ 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26,
+ 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41,
+ 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0,
+ 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137,
+ 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5,
+ 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0,
+ 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147,
+ 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5,
+ 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114,
+ 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0,
+ 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0,
+ 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0,
+ 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0,
+ 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169,
+ 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2,
+ 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0,
+ 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178,
+ 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179,
+ 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3,
+ 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187,
+ 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190,
+ 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189,
+ 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57,
+ 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28,
+ 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55,
+ 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203,
+ 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207,
+ 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210,
+ 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225,
+ 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3,
+ 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3,
+ 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3,
+ 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0,
+ 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0,
+ 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229,
+ 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80,
+ 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1,
+ 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0,
+ 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241,
+ 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246,
+ 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1,
+ 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0,
+ 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0,
+ 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255,
+ 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275,
+ 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1,
+ 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0,
+ 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0,
+ 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269,
+ 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273,
+ 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1,
+ 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0,
+ 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278,
+ 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290,
+ 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1,
+ 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0,
+ 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0,
+ 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293,
+ 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1,
+ 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0,
+ 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0,
+ 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303,
+ 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306,
+ 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0,
+ 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0,
+ 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313,
+ 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316,
+ 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34,
+ 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0,
+ 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324,
+ 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324,
+ 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5,
+ 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69,
+ 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0,
+ 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337,
+ 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341,
+ 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5,
+ 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69,
+ 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0,
+ 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351,
+ 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355,
+ 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5,
+ 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0,
+ 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0,
+ 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366,
+ 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368,
+ 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0,
+ 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0,
+ 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378,
+ 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383,
+ 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0,
+ 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0,
+ 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390,
+ 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394,
+ 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9,
+ 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0,
+ 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402,
+ 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407,
+ 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0,
+ 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0,
+ 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409,
+ 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3,
+ 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0,
+ 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30,
+ 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418,
+ 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421,
+ 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181,
+ 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294,
+ 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406,
+ 413, 418, 420, 1, 0, 1, 0,
+ }
+ deserializer := antlr.NewATNDeserializer(nil)
+ staticData.atn = deserializer.Deserialize(staticData.serializedATN)
+ atn := staticData.atn
+ staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
+ decisionToDFA := staticData.decisionToDFA
+ for index, state := range atn.DecisionToState {
+ decisionToDFA[index] = antlr.NewDFA(state, index)
+ }
+}
+
+// CELLexerInit initializes any static state used to implement CELLexer. By default the
+// static state used to implement the lexer is lazily initialized during the first call to
+// NewCELLexer(). You can call this function if you wish to initialize the static state ahead
+// of time.
+func CELLexerInit() {
+ staticData := &cellexerLexerStaticData
+ staticData.once.Do(cellexerLexerInit)
+}
+
+// NewCELLexer produces a new lexer instance for the optional input antlr.CharStream.
+func NewCELLexer(input antlr.CharStream) *CELLexer {
+ CELLexerInit()
+ l := new(CELLexer)
+ l.BaseLexer = antlr.NewBaseLexer(input)
+ staticData := &cellexerLexerStaticData
+ l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache)
+ l.channelNames = staticData.channelNames
+ l.modeNames = staticData.modeNames
+ l.RuleNames = staticData.ruleNames
+ l.LiteralNames = staticData.literalNames
+ l.SymbolicNames = staticData.symbolicNames
+ l.GrammarFileName = "CEL.g4"
+ // TODO: l.EOF = antlr.TokenEOF
+
+ return l
+}
+
+// CELLexer tokens.
+const (
+ CELLexerEQUALS = 1
+ CELLexerNOT_EQUALS = 2
+ CELLexerIN = 3
+ CELLexerLESS = 4
+ CELLexerLESS_EQUALS = 5
+ CELLexerGREATER_EQUALS = 6
+ CELLexerGREATER = 7
+ CELLexerLOGICAL_AND = 8
+ CELLexerLOGICAL_OR = 9
+ CELLexerLBRACKET = 10
+ CELLexerRPRACKET = 11
+ CELLexerLBRACE = 12
+ CELLexerRBRACE = 13
+ CELLexerLPAREN = 14
+ CELLexerRPAREN = 15
+ CELLexerDOT = 16
+ CELLexerCOMMA = 17
+ CELLexerMINUS = 18
+ CELLexerEXCLAM = 19
+ CELLexerQUESTIONMARK = 20
+ CELLexerCOLON = 21
+ CELLexerPLUS = 22
+ CELLexerSTAR = 23
+ CELLexerSLASH = 24
+ CELLexerPERCENT = 25
+ CELLexerCEL_TRUE = 26
+ CELLexerCEL_FALSE = 27
+ CELLexerNUL = 28
+ CELLexerWHITESPACE = 29
+ CELLexerCOMMENT = 30
+ CELLexerNUM_FLOAT = 31
+ CELLexerNUM_INT = 32
+ CELLexerNUM_UINT = 33
+ CELLexerSTRING = 34
+ CELLexerBYTES = 35
+ CELLexerIDENTIFIER = 36
+)
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
new file mode 100644
index 0000000..73b7f1d
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
@@ -0,0 +1,207 @@
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
+
+package gen // CEL
+import "github.com/antlr/antlr4/runtime/Go/antlr/v4"
+
+// CELListener is a complete listener for a parse tree produced by CELParser.
+type CELListener interface {
+ antlr.ParseTreeListener
+
+ // EnterStart is called when entering the start production.
+ EnterStart(c *StartContext)
+
+ // EnterExpr is called when entering the expr production.
+ EnterExpr(c *ExprContext)
+
+ // EnterConditionalOr is called when entering the conditionalOr production.
+ EnterConditionalOr(c *ConditionalOrContext)
+
+ // EnterConditionalAnd is called when entering the conditionalAnd production.
+ EnterConditionalAnd(c *ConditionalAndContext)
+
+ // EnterRelation is called when entering the relation production.
+ EnterRelation(c *RelationContext)
+
+ // EnterCalc is called when entering the calc production.
+ EnterCalc(c *CalcContext)
+
+ // EnterMemberExpr is called when entering the MemberExpr production.
+ EnterMemberExpr(c *MemberExprContext)
+
+ // EnterLogicalNot is called when entering the LogicalNot production.
+ EnterLogicalNot(c *LogicalNotContext)
+
+ // EnterNegate is called when entering the Negate production.
+ EnterNegate(c *NegateContext)
+
+ // EnterMemberCall is called when entering the MemberCall production.
+ EnterMemberCall(c *MemberCallContext)
+
+ // EnterSelect is called when entering the Select production.
+ EnterSelect(c *SelectContext)
+
+ // EnterPrimaryExpr is called when entering the PrimaryExpr production.
+ EnterPrimaryExpr(c *PrimaryExprContext)
+
+ // EnterIndex is called when entering the Index production.
+ EnterIndex(c *IndexContext)
+
+ // EnterIdentOrGlobalCall is called when entering the IdentOrGlobalCall production.
+ EnterIdentOrGlobalCall(c *IdentOrGlobalCallContext)
+
+ // EnterNested is called when entering the Nested production.
+ EnterNested(c *NestedContext)
+
+ // EnterCreateList is called when entering the CreateList production.
+ EnterCreateList(c *CreateListContext)
+
+ // EnterCreateStruct is called when entering the CreateStruct production.
+ EnterCreateStruct(c *CreateStructContext)
+
+ // EnterCreateMessage is called when entering the CreateMessage production.
+ EnterCreateMessage(c *CreateMessageContext)
+
+ // EnterConstantLiteral is called when entering the ConstantLiteral production.
+ EnterConstantLiteral(c *ConstantLiteralContext)
+
+ // EnterExprList is called when entering the exprList production.
+ EnterExprList(c *ExprListContext)
+
+ // EnterListInit is called when entering the listInit production.
+ EnterListInit(c *ListInitContext)
+
+ // EnterFieldInitializerList is called when entering the fieldInitializerList production.
+ EnterFieldInitializerList(c *FieldInitializerListContext)
+
+ // EnterOptField is called when entering the optField production.
+ EnterOptField(c *OptFieldContext)
+
+ // EnterMapInitializerList is called when entering the mapInitializerList production.
+ EnterMapInitializerList(c *MapInitializerListContext)
+
+ // EnterOptExpr is called when entering the optExpr production.
+ EnterOptExpr(c *OptExprContext)
+
+ // EnterInt is called when entering the Int production.
+ EnterInt(c *IntContext)
+
+ // EnterUint is called when entering the Uint production.
+ EnterUint(c *UintContext)
+
+ // EnterDouble is called when entering the Double production.
+ EnterDouble(c *DoubleContext)
+
+ // EnterString is called when entering the String production.
+ EnterString(c *StringContext)
+
+ // EnterBytes is called when entering the Bytes production.
+ EnterBytes(c *BytesContext)
+
+ // EnterBoolTrue is called when entering the BoolTrue production.
+ EnterBoolTrue(c *BoolTrueContext)
+
+ // EnterBoolFalse is called when entering the BoolFalse production.
+ EnterBoolFalse(c *BoolFalseContext)
+
+ // EnterNull is called when entering the Null production.
+ EnterNull(c *NullContext)
+
+ // ExitStart is called when exiting the start production.
+ ExitStart(c *StartContext)
+
+ // ExitExpr is called when exiting the expr production.
+ ExitExpr(c *ExprContext)
+
+ // ExitConditionalOr is called when exiting the conditionalOr production.
+ ExitConditionalOr(c *ConditionalOrContext)
+
+ // ExitConditionalAnd is called when exiting the conditionalAnd production.
+ ExitConditionalAnd(c *ConditionalAndContext)
+
+ // ExitRelation is called when exiting the relation production.
+ ExitRelation(c *RelationContext)
+
+ // ExitCalc is called when exiting the calc production.
+ ExitCalc(c *CalcContext)
+
+ // ExitMemberExpr is called when exiting the MemberExpr production.
+ ExitMemberExpr(c *MemberExprContext)
+
+ // ExitLogicalNot is called when exiting the LogicalNot production.
+ ExitLogicalNot(c *LogicalNotContext)
+
+ // ExitNegate is called when exiting the Negate production.
+ ExitNegate(c *NegateContext)
+
+ // ExitMemberCall is called when exiting the MemberCall production.
+ ExitMemberCall(c *MemberCallContext)
+
+ // ExitSelect is called when exiting the Select production.
+ ExitSelect(c *SelectContext)
+
+ // ExitPrimaryExpr is called when exiting the PrimaryExpr production.
+ ExitPrimaryExpr(c *PrimaryExprContext)
+
+ // ExitIndex is called when exiting the Index production.
+ ExitIndex(c *IndexContext)
+
+ // ExitIdentOrGlobalCall is called when exiting the IdentOrGlobalCall production.
+ ExitIdentOrGlobalCall(c *IdentOrGlobalCallContext)
+
+ // ExitNested is called when exiting the Nested production.
+ ExitNested(c *NestedContext)
+
+ // ExitCreateList is called when exiting the CreateList production.
+ ExitCreateList(c *CreateListContext)
+
+ // ExitCreateStruct is called when exiting the CreateStruct production.
+ ExitCreateStruct(c *CreateStructContext)
+
+ // ExitCreateMessage is called when exiting the CreateMessage production.
+ ExitCreateMessage(c *CreateMessageContext)
+
+ // ExitConstantLiteral is called when exiting the ConstantLiteral production.
+ ExitConstantLiteral(c *ConstantLiteralContext)
+
+ // ExitExprList is called when exiting the exprList production.
+ ExitExprList(c *ExprListContext)
+
+ // ExitListInit is called when exiting the listInit production.
+ ExitListInit(c *ListInitContext)
+
+ // ExitFieldInitializerList is called when exiting the fieldInitializerList production.
+ ExitFieldInitializerList(c *FieldInitializerListContext)
+
+ // ExitOptField is called when exiting the optField production.
+ ExitOptField(c *OptFieldContext)
+
+ // ExitMapInitializerList is called when exiting the mapInitializerList production.
+ ExitMapInitializerList(c *MapInitializerListContext)
+
+ // ExitOptExpr is called when exiting the optExpr production.
+ ExitOptExpr(c *OptExprContext)
+
+ // ExitInt is called when exiting the Int production.
+ ExitInt(c *IntContext)
+
+ // ExitUint is called when exiting the Uint production.
+ ExitUint(c *UintContext)
+
+ // ExitDouble is called when exiting the Double production.
+ ExitDouble(c *DoubleContext)
+
+ // ExitString is called when exiting the String production.
+ ExitString(c *StringContext)
+
+ // ExitBytes is called when exiting the Bytes production.
+ ExitBytes(c *BytesContext)
+
+ // ExitBoolTrue is called when exiting the BoolTrue production.
+ ExitBoolTrue(c *BoolTrueContext)
+
+ // ExitBoolFalse is called when exiting the BoolFalse production.
+ ExitBoolFalse(c *BoolFalseContext)
+
+ // ExitNull is called when exiting the Null production.
+ ExitNull(c *NullContext)
+}
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
new file mode 100644
index 0000000..0cb6c8e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
@@ -0,0 +1,5532 @@
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
+
+package gen // CEL
+import (
+ "fmt"
+ "strconv"
+ "sync"
+
+ "github.com/antlr/antlr4/runtime/Go/antlr/v4"
+)
+
+// Suppress unused import errors
+var _ = fmt.Printf
+var _ = strconv.Itoa
+var _ = sync.Once{}
+
+type CELParser struct {
+ *antlr.BaseParser
+}
+
+var celParserStaticData struct {
+ once sync.Once
+ serializedATN []int32
+ literalNames []string
+ symbolicNames []string
+ ruleNames []string
+ predictionContextCache *antlr.PredictionContextCache
+ atn *antlr.ATN
+ decisionToDFA []*antlr.DFA
+}
+
+func celParserInit() {
+ staticData := &celParserStaticData
+ staticData.literalNames = []string{
+ "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'",
+ "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'",
+ "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'",
+ }
+ staticData.symbolicNames = []string{
+ "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
+ "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
+ "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
+ "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
+ "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT",
+ "STRING", "BYTES", "IDENTIFIER",
+ }
+ staticData.ruleNames = []string{
+ "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc",
+ "unary", "member", "primary", "exprList", "listInit", "fieldInitializerList",
+ "optField", "mapInitializerList", "optExpr", "literal",
+ }
+ staticData.predictionContextCache = antlr.NewPredictionContextCache()
+ staticData.serializedATN = []int32{
+ 4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
+ 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7,
+ 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15,
+ 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1,
+ 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3,
+ 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1,
+ 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5,
+ 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1,
+ 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6,
+ 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3,
+ 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7,
+ 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10,
+ 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136,
+ 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8,
+ 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1,
+ 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8,
+ 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8,
+ 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186,
+ 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10,
+ 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1,
+ 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12,
+ 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1,
+ 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14,
+ 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15,
+ 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249,
+ 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
+ 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22,
+ 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1,
+ 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14,
+ 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0,
+ 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28,
+ 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0,
+ 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38,
+ 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0,
+ 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6,
+ 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50,
+ 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0,
+ 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3,
+ 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56,
+ 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1,
+ 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64,
+ 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0,
+ 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0,
+ 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73,
+ 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1,
+ 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79,
+ 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0,
+ 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87,
+ 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0,
+ 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5,
+ 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94,
+ 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0,
+ 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100,
+ 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10,
+ 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0,
+ 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0,
+ 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111,
+ 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114,
+ 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10,
+ 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0,
+ 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0,
+ 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124,
+ 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124,
+ 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0,
+ 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0,
+ 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134,
+ 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137,
+ 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0,
+ 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0,
+ 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145,
+ 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149,
+ 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0,
+ 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0,
+ 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0,
+ 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157,
+ 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162,
+ 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0,
+ 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0,
+ 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168,
+ 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173,
+ 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1,
+ 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0,
+ 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179,
+ 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144,
+ 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0,
+ 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0,
+ 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187,
+ 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1,
+ 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28,
+ 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0,
+ 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199,
+ 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5,
+ 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2,
+ 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0,
+ 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208,
+ 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0,
+ 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0,
+ 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219,
+ 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223,
+ 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0,
+ 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0,
+ 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230,
+ 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1,
+ 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0,
+ 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0,
+ 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241,
+ 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249,
+ 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5,
+ 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0,
+ 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248,
+ 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48,
+ 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146,
+ 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235,
+ 240, 248,
+ }
+ deserializer := antlr.NewATNDeserializer(nil)
+ staticData.atn = deserializer.Deserialize(staticData.serializedATN)
+ atn := staticData.atn
+ staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
+ decisionToDFA := staticData.decisionToDFA
+ for index, state := range atn.DecisionToState {
+ decisionToDFA[index] = antlr.NewDFA(state, index)
+ }
+}
+
+// CELParserInit initializes any static state used to implement CELParser. By default the
+// static state used to implement the parser is lazily initialized during the first call to
+// NewCELParser(). You can call this function if you wish to initialize the static state ahead
+// of time.
+func CELParserInit() {
+ staticData := &celParserStaticData
+ staticData.once.Do(celParserInit)
+}
+
+// NewCELParser produces a new parser instance for the optional input antlr.TokenStream.
+func NewCELParser(input antlr.TokenStream) *CELParser {
+ CELParserInit()
+ this := new(CELParser)
+ this.BaseParser = antlr.NewBaseParser(input)
+ staticData := &celParserStaticData
+ this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache)
+ this.RuleNames = staticData.ruleNames
+ this.LiteralNames = staticData.literalNames
+ this.SymbolicNames = staticData.symbolicNames
+ this.GrammarFileName = "CEL.g4"
+
+ return this
+}
+
+// CELParser tokens.
+const (
+ CELParserEOF = antlr.TokenEOF
+ CELParserEQUALS = 1
+ CELParserNOT_EQUALS = 2
+ CELParserIN = 3
+ CELParserLESS = 4
+ CELParserLESS_EQUALS = 5
+ CELParserGREATER_EQUALS = 6
+ CELParserGREATER = 7
+ CELParserLOGICAL_AND = 8
+ CELParserLOGICAL_OR = 9
+ CELParserLBRACKET = 10
+ CELParserRPRACKET = 11
+ CELParserLBRACE = 12
+ CELParserRBRACE = 13
+ CELParserLPAREN = 14
+ CELParserRPAREN = 15
+ CELParserDOT = 16
+ CELParserCOMMA = 17
+ CELParserMINUS = 18
+ CELParserEXCLAM = 19
+ CELParserQUESTIONMARK = 20
+ CELParserCOLON = 21
+ CELParserPLUS = 22
+ CELParserSTAR = 23
+ CELParserSLASH = 24
+ CELParserPERCENT = 25
+ CELParserCEL_TRUE = 26
+ CELParserCEL_FALSE = 27
+ CELParserNUL = 28
+ CELParserWHITESPACE = 29
+ CELParserCOMMENT = 30
+ CELParserNUM_FLOAT = 31
+ CELParserNUM_INT = 32
+ CELParserNUM_UINT = 33
+ CELParserSTRING = 34
+ CELParserBYTES = 35
+ CELParserIDENTIFIER = 36
+)
+
+// CELParser rules.
+const (
+ CELParserRULE_start = 0
+ CELParserRULE_expr = 1
+ CELParserRULE_conditionalOr = 2
+ CELParserRULE_conditionalAnd = 3
+ CELParserRULE_relation = 4
+ CELParserRULE_calc = 5
+ CELParserRULE_unary = 6
+ CELParserRULE_member = 7
+ CELParserRULE_primary = 8
+ CELParserRULE_exprList = 9
+ CELParserRULE_listInit = 10
+ CELParserRULE_fieldInitializerList = 11
+ CELParserRULE_optField = 12
+ CELParserRULE_mapInitializerList = 13
+ CELParserRULE_optExpr = 14
+ CELParserRULE_literal = 15
+)
+
+// IStartContext is an interface to support dynamic dispatch.
+type IStartContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetE returns the e rule contexts.
+ GetE() IExprContext
+
+ // SetE sets the e rule contexts.
+ SetE(IExprContext)
+
+ // Getter signatures
+ EOF() antlr.TerminalNode
+ Expr() IExprContext
+
+ // IsStartContext differentiates from other interfaces.
+ IsStartContext()
+}
+
+type StartContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ e IExprContext
+}
+
+func NewEmptyStartContext() *StartContext {
+ var p = new(StartContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_start
+ return p
+}
+
+func (*StartContext) IsStartContext() {}
+
+func NewStartContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *StartContext {
+ var p = new(StartContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_start
+
+ return p
+}
+
+func (s *StartContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *StartContext) GetE() IExprContext { return s.e }
+
+func (s *StartContext) SetE(v IExprContext) { s.e = v }
+
+func (s *StartContext) EOF() antlr.TerminalNode {
+ return s.GetToken(CELParserEOF, 0)
+}
+
+func (s *StartContext) Expr() IExprContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *StartContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *StartContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *StartContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterStart(s)
+ }
+}
+
+func (s *StartContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitStart(s)
+ }
+}
+
+func (s *StartContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitStart(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) Start() (localctx IStartContext) {
+ this := p
+ _ = this
+
+ localctx = NewStartContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 0, CELParserRULE_start)
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(32)
+
+ var _x = p.Expr()
+
+ localctx.(*StartContext).e = _x
+ }
+ {
+ p.SetState(33)
+ p.Match(CELParserEOF)
+ }
+
+ return localctx
+}
+
+// IExprContext is an interface to support dynamic dispatch.
+type IExprContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetOp returns the op token.
+ GetOp() antlr.Token
+
+ // SetOp sets the op token.
+ SetOp(antlr.Token)
+
+ // GetE returns the e rule contexts.
+ GetE() IConditionalOrContext
+
+ // GetE1 returns the e1 rule contexts.
+ GetE1() IConditionalOrContext
+
+ // GetE2 returns the e2 rule contexts.
+ GetE2() IExprContext
+
+ // SetE sets the e rule contexts.
+ SetE(IConditionalOrContext)
+
+ // SetE1 sets the e1 rule contexts.
+ SetE1(IConditionalOrContext)
+
+ // SetE2 sets the e2 rule contexts.
+ SetE2(IExprContext)
+
+ // Getter signatures
+ AllConditionalOr() []IConditionalOrContext
+ ConditionalOr(i int) IConditionalOrContext
+ COLON() antlr.TerminalNode
+ QUESTIONMARK() antlr.TerminalNode
+ Expr() IExprContext
+
+ // IsExprContext differentiates from other interfaces.
+ IsExprContext()
+}
+
+type ExprContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ e IConditionalOrContext
+ op antlr.Token
+ e1 IConditionalOrContext
+ e2 IExprContext
+}
+
+func NewEmptyExprContext() *ExprContext {
+ var p = new(ExprContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_expr
+ return p
+}
+
+func (*ExprContext) IsExprContext() {}
+
+func NewExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprContext {
+ var p = new(ExprContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_expr
+
+ return p
+}
+
+func (s *ExprContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ExprContext) GetOp() antlr.Token { return s.op }
+
+func (s *ExprContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *ExprContext) GetE() IConditionalOrContext { return s.e }
+
+func (s *ExprContext) GetE1() IConditionalOrContext { return s.e1 }
+
+func (s *ExprContext) GetE2() IExprContext { return s.e2 }
+
+func (s *ExprContext) SetE(v IConditionalOrContext) { s.e = v }
+
+func (s *ExprContext) SetE1(v IConditionalOrContext) { s.e1 = v }
+
+func (s *ExprContext) SetE2(v IExprContext) { s.e2 = v }
+
+func (s *ExprContext) AllConditionalOr() []IConditionalOrContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IConditionalOrContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IConditionalOrContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IConditionalOrContext); ok {
+ tst[i] = t.(IConditionalOrContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *ExprContext) ConditionalOr(i int) IConditionalOrContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IConditionalOrContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IConditionalOrContext)
+}
+
+func (s *ExprContext) COLON() antlr.TerminalNode {
+ return s.GetToken(CELParserCOLON, 0)
+}
+
+func (s *ExprContext) QUESTIONMARK() antlr.TerminalNode {
+ return s.GetToken(CELParserQUESTIONMARK, 0)
+}
+
+func (s *ExprContext) Expr() IExprContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *ExprContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *ExprContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterExpr(s)
+ }
+}
+
+func (s *ExprContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitExpr(s)
+ }
+}
+
+func (s *ExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitExpr(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) Expr() (localctx IExprContext) {
+ this := p
+ _ = this
+
+ localctx = NewExprContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 2, CELParserRULE_expr)
+ var _la int
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(35)
+
+ var _x = p.ConditionalOr()
+
+ localctx.(*ExprContext).e = _x
+ }
+ p.SetState(41)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if _la == CELParserQUESTIONMARK {
+ {
+ p.SetState(36)
+
+ var _m = p.Match(CELParserQUESTIONMARK)
+
+ localctx.(*ExprContext).op = _m
+ }
+ {
+ p.SetState(37)
+
+ var _x = p.ConditionalOr()
+
+ localctx.(*ExprContext).e1 = _x
+ }
+ {
+ p.SetState(38)
+ p.Match(CELParserCOLON)
+ }
+ {
+ p.SetState(39)
+
+ var _x = p.Expr()
+
+ localctx.(*ExprContext).e2 = _x
+ }
+
+ }
+
+ return localctx
+}
+
+// IConditionalOrContext is an interface to support dynamic dispatch.
+type IConditionalOrContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetS9 returns the s9 token.
+ GetS9() antlr.Token
+
+ // SetS9 sets the s9 token.
+ SetS9(antlr.Token)
+
+ // GetOps returns the ops token list.
+ GetOps() []antlr.Token
+
+ // SetOps sets the ops token list.
+ SetOps([]antlr.Token)
+
+ // GetE returns the e rule contexts.
+ GetE() IConditionalAndContext
+
+ // Get_conditionalAnd returns the _conditionalAnd rule contexts.
+ Get_conditionalAnd() IConditionalAndContext
+
+ // SetE sets the e rule contexts.
+ SetE(IConditionalAndContext)
+
+ // Set_conditionalAnd sets the _conditionalAnd rule contexts.
+ Set_conditionalAnd(IConditionalAndContext)
+
+ // GetE1 returns the e1 rule context list.
+ GetE1() []IConditionalAndContext
+
+ // SetE1 sets the e1 rule context list.
+ SetE1([]IConditionalAndContext)
+
+ // Getter signatures
+ AllConditionalAnd() []IConditionalAndContext
+ ConditionalAnd(i int) IConditionalAndContext
+ AllLOGICAL_OR() []antlr.TerminalNode
+ LOGICAL_OR(i int) antlr.TerminalNode
+
+ // IsConditionalOrContext differentiates from other interfaces.
+ IsConditionalOrContext()
+}
+
+type ConditionalOrContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ e IConditionalAndContext
+ s9 antlr.Token
+ ops []antlr.Token
+ _conditionalAnd IConditionalAndContext
+ e1 []IConditionalAndContext
+}
+
+func NewEmptyConditionalOrContext() *ConditionalOrContext {
+ var p = new(ConditionalOrContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_conditionalOr
+ return p
+}
+
+func (*ConditionalOrContext) IsConditionalOrContext() {}
+
+func NewConditionalOrContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalOrContext {
+ var p = new(ConditionalOrContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_conditionalOr
+
+ return p
+}
+
+func (s *ConditionalOrContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ConditionalOrContext) GetS9() antlr.Token { return s.s9 }
+
+func (s *ConditionalOrContext) SetS9(v antlr.Token) { s.s9 = v }
+
+func (s *ConditionalOrContext) GetOps() []antlr.Token { return s.ops }
+
+func (s *ConditionalOrContext) SetOps(v []antlr.Token) { s.ops = v }
+
+func (s *ConditionalOrContext) GetE() IConditionalAndContext { return s.e }
+
+func (s *ConditionalOrContext) Get_conditionalAnd() IConditionalAndContext { return s._conditionalAnd }
+
+func (s *ConditionalOrContext) SetE(v IConditionalAndContext) { s.e = v }
+
+func (s *ConditionalOrContext) Set_conditionalAnd(v IConditionalAndContext) { s._conditionalAnd = v }
+
+func (s *ConditionalOrContext) GetE1() []IConditionalAndContext { return s.e1 }
+
+func (s *ConditionalOrContext) SetE1(v []IConditionalAndContext) { s.e1 = v }
+
+func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IConditionalAndContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IConditionalAndContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IConditionalAndContext); ok {
+ tst[i] = t.(IConditionalAndContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *ConditionalOrContext) ConditionalAnd(i int) IConditionalAndContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IConditionalAndContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IConditionalAndContext)
+}
+
+func (s *ConditionalOrContext) AllLOGICAL_OR() []antlr.TerminalNode {
+ return s.GetTokens(CELParserLOGICAL_OR)
+}
+
+func (s *ConditionalOrContext) LOGICAL_OR(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserLOGICAL_OR, i)
+}
+
+func (s *ConditionalOrContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ConditionalOrContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *ConditionalOrContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterConditionalOr(s)
+ }
+}
+
+func (s *ConditionalOrContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitConditionalOr(s)
+ }
+}
+
+func (s *ConditionalOrContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitConditionalOr(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) {
+ this := p
+ _ = this
+
+ localctx = NewConditionalOrContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 4, CELParserRULE_conditionalOr)
+ var _la int
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(43)
+
+ var _x = p.ConditionalAnd()
+
+ localctx.(*ConditionalOrContext).e = _x
+ }
+ p.SetState(48)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ for _la == CELParserLOGICAL_OR {
+ {
+ p.SetState(44)
+
+ var _m = p.Match(CELParserLOGICAL_OR)
+
+ localctx.(*ConditionalOrContext).s9 = _m
+ }
+ localctx.(*ConditionalOrContext).ops = append(localctx.(*ConditionalOrContext).ops, localctx.(*ConditionalOrContext).s9)
+ {
+ p.SetState(45)
+
+ var _x = p.ConditionalAnd()
+
+ localctx.(*ConditionalOrContext)._conditionalAnd = _x
+ }
+ localctx.(*ConditionalOrContext).e1 = append(localctx.(*ConditionalOrContext).e1, localctx.(*ConditionalOrContext)._conditionalAnd)
+
+ p.SetState(50)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+ }
+
+ return localctx
+}
+
+// IConditionalAndContext is an interface to support dynamic dispatch.
+type IConditionalAndContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetS8 returns the s8 token.
+ GetS8() antlr.Token
+
+ // SetS8 sets the s8 token.
+ SetS8(antlr.Token)
+
+ // GetOps returns the ops token list.
+ GetOps() []antlr.Token
+
+ // SetOps sets the ops token list.
+ SetOps([]antlr.Token)
+
+ // GetE returns the e rule contexts.
+ GetE() IRelationContext
+
+ // Get_relation returns the _relation rule contexts.
+ Get_relation() IRelationContext
+
+ // SetE sets the e rule contexts.
+ SetE(IRelationContext)
+
+ // Set_relation sets the _relation rule contexts.
+ Set_relation(IRelationContext)
+
+ // GetE1 returns the e1 rule context list.
+ GetE1() []IRelationContext
+
+ // SetE1 sets the e1 rule context list.
+ SetE1([]IRelationContext)
+
+ // Getter signatures
+ AllRelation() []IRelationContext
+ Relation(i int) IRelationContext
+ AllLOGICAL_AND() []antlr.TerminalNode
+ LOGICAL_AND(i int) antlr.TerminalNode
+
+ // IsConditionalAndContext differentiates from other interfaces.
+ IsConditionalAndContext()
+}
+
+type ConditionalAndContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ e IRelationContext
+ s8 antlr.Token
+ ops []antlr.Token
+ _relation IRelationContext
+ e1 []IRelationContext
+}
+
+func NewEmptyConditionalAndContext() *ConditionalAndContext {
+ var p = new(ConditionalAndContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_conditionalAnd
+ return p
+}
+
+func (*ConditionalAndContext) IsConditionalAndContext() {}
+
+func NewConditionalAndContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalAndContext {
+ var p = new(ConditionalAndContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_conditionalAnd
+
+ return p
+}
+
+func (s *ConditionalAndContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ConditionalAndContext) GetS8() antlr.Token { return s.s8 }
+
+func (s *ConditionalAndContext) SetS8(v antlr.Token) { s.s8 = v }
+
+func (s *ConditionalAndContext) GetOps() []antlr.Token { return s.ops }
+
+func (s *ConditionalAndContext) SetOps(v []antlr.Token) { s.ops = v }
+
+func (s *ConditionalAndContext) GetE() IRelationContext { return s.e }
+
+func (s *ConditionalAndContext) Get_relation() IRelationContext { return s._relation }
+
+func (s *ConditionalAndContext) SetE(v IRelationContext) { s.e = v }
+
+func (s *ConditionalAndContext) Set_relation(v IRelationContext) { s._relation = v }
+
+func (s *ConditionalAndContext) GetE1() []IRelationContext { return s.e1 }
+
+func (s *ConditionalAndContext) SetE1(v []IRelationContext) { s.e1 = v }
+
+func (s *ConditionalAndContext) AllRelation() []IRelationContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IRelationContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IRelationContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IRelationContext); ok {
+ tst[i] = t.(IRelationContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *ConditionalAndContext) Relation(i int) IRelationContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IRelationContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IRelationContext)
+}
+
+func (s *ConditionalAndContext) AllLOGICAL_AND() []antlr.TerminalNode {
+ return s.GetTokens(CELParserLOGICAL_AND)
+}
+
+func (s *ConditionalAndContext) LOGICAL_AND(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserLOGICAL_AND, i)
+}
+
+func (s *ConditionalAndContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ConditionalAndContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *ConditionalAndContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterConditionalAnd(s)
+ }
+}
+
+func (s *ConditionalAndContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitConditionalAnd(s)
+ }
+}
+
+func (s *ConditionalAndContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitConditionalAnd(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) {
+ this := p
+ _ = this
+
+ localctx = NewConditionalAndContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 6, CELParserRULE_conditionalAnd)
+ var _la int
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(51)
+
+ var _x = p.relation(0)
+
+ localctx.(*ConditionalAndContext).e = _x
+ }
+ p.SetState(56)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ for _la == CELParserLOGICAL_AND {
+ {
+ p.SetState(52)
+
+ var _m = p.Match(CELParserLOGICAL_AND)
+
+ localctx.(*ConditionalAndContext).s8 = _m
+ }
+ localctx.(*ConditionalAndContext).ops = append(localctx.(*ConditionalAndContext).ops, localctx.(*ConditionalAndContext).s8)
+ {
+ p.SetState(53)
+
+ var _x = p.relation(0)
+
+ localctx.(*ConditionalAndContext)._relation = _x
+ }
+ localctx.(*ConditionalAndContext).e1 = append(localctx.(*ConditionalAndContext).e1, localctx.(*ConditionalAndContext)._relation)
+
+ p.SetState(58)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+ }
+
+ return localctx
+}
+
+// IRelationContext is an interface to support dynamic dispatch.
+type IRelationContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetOp returns the op token.
+ GetOp() antlr.Token
+
+ // SetOp sets the op token.
+ SetOp(antlr.Token)
+
+ // Getter signatures
+ Calc() ICalcContext
+ AllRelation() []IRelationContext
+ Relation(i int) IRelationContext
+ LESS() antlr.TerminalNode
+ LESS_EQUALS() antlr.TerminalNode
+ GREATER_EQUALS() antlr.TerminalNode
+ GREATER() antlr.TerminalNode
+ EQUALS() antlr.TerminalNode
+ NOT_EQUALS() antlr.TerminalNode
+ IN() antlr.TerminalNode
+
+ // IsRelationContext differentiates from other interfaces.
+ IsRelationContext()
+}
+
+type RelationContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ op antlr.Token
+}
+
+func NewEmptyRelationContext() *RelationContext {
+ var p = new(RelationContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_relation
+ return p
+}
+
+func (*RelationContext) IsRelationContext() {}
+
+func NewRelationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *RelationContext {
+ var p = new(RelationContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_relation
+
+ return p
+}
+
+func (s *RelationContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *RelationContext) GetOp() antlr.Token { return s.op }
+
+func (s *RelationContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *RelationContext) Calc() ICalcContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(ICalcContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(ICalcContext)
+}
+
+func (s *RelationContext) AllRelation() []IRelationContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IRelationContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IRelationContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IRelationContext); ok {
+ tst[i] = t.(IRelationContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *RelationContext) Relation(i int) IRelationContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IRelationContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IRelationContext)
+}
+
+func (s *RelationContext) LESS() antlr.TerminalNode {
+ return s.GetToken(CELParserLESS, 0)
+}
+
+func (s *RelationContext) LESS_EQUALS() antlr.TerminalNode {
+ return s.GetToken(CELParserLESS_EQUALS, 0)
+}
+
+func (s *RelationContext) GREATER_EQUALS() antlr.TerminalNode {
+ return s.GetToken(CELParserGREATER_EQUALS, 0)
+}
+
+func (s *RelationContext) GREATER() antlr.TerminalNode {
+ return s.GetToken(CELParserGREATER, 0)
+}
+
+func (s *RelationContext) EQUALS() antlr.TerminalNode {
+ return s.GetToken(CELParserEQUALS, 0)
+}
+
+func (s *RelationContext) NOT_EQUALS() antlr.TerminalNode {
+ return s.GetToken(CELParserNOT_EQUALS, 0)
+}
+
+func (s *RelationContext) IN() antlr.TerminalNode {
+ return s.GetToken(CELParserIN, 0)
+}
+
+func (s *RelationContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *RelationContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *RelationContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterRelation(s)
+ }
+}
+
+func (s *RelationContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitRelation(s)
+ }
+}
+
+func (s *RelationContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitRelation(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) Relation() (localctx IRelationContext) {
+ return p.relation(0)
+}
+
+func (p *CELParser) relation(_p int) (localctx IRelationContext) {
+ this := p
+ _ = this
+
+ var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
+ _parentState := p.GetState()
+ localctx = NewRelationContext(p, p.GetParserRuleContext(), _parentState)
+ var _prevctx IRelationContext = localctx
+ var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning.
+ _startState := 8
+ p.EnterRecursionRule(localctx, 8, CELParserRULE_relation, _p)
+ var _la int
+
+ defer func() {
+ p.UnrollRecursionContexts(_parentctx)
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(60)
+ p.calc(0)
+ }
+
+ p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
+ p.SetState(67)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext())
+
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ if p.GetParseListeners() != nil {
+ p.TriggerExitRuleEvent()
+ }
+ _prevctx = localctx
+ localctx = NewRelationContext(p, _parentctx, _parentState)
+ p.PushNewRecursionContext(localctx, _startState, CELParserRULE_relation)
+ p.SetState(62)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 1)) {
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
+ }
+ {
+ p.SetState(63)
+
+ var _lt = p.GetTokenStream().LT(1)
+
+ localctx.(*RelationContext).op = _lt
+
+ _la = p.GetTokenStream().LA(1)
+
+ if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&254) != 0) {
+ var _ri = p.GetErrorHandler().RecoverInline(p)
+
+ localctx.(*RelationContext).op = _ri
+ } else {
+ p.GetErrorHandler().ReportMatch(p)
+ p.Consume()
+ }
+ }
+ {
+ p.SetState(64)
+ p.relation(2)
+ }
+
+ }
+ p.SetState(69)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext())
+ }
+
+ return localctx
+}
+
+// ICalcContext is an interface to support dynamic dispatch.
+type ICalcContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetOp returns the op token.
+ GetOp() antlr.Token
+
+ // SetOp sets the op token.
+ SetOp(antlr.Token)
+
+ // Getter signatures
+ Unary() IUnaryContext
+ AllCalc() []ICalcContext
+ Calc(i int) ICalcContext
+ STAR() antlr.TerminalNode
+ SLASH() antlr.TerminalNode
+ PERCENT() antlr.TerminalNode
+ PLUS() antlr.TerminalNode
+ MINUS() antlr.TerminalNode
+
+ // IsCalcContext differentiates from other interfaces.
+ IsCalcContext()
+}
+
+type CalcContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ op antlr.Token
+}
+
+func NewEmptyCalcContext() *CalcContext {
+ var p = new(CalcContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_calc
+ return p
+}
+
+func (*CalcContext) IsCalcContext() {}
+
+func NewCalcContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *CalcContext {
+ var p = new(CalcContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_calc
+
+ return p
+}
+
+func (s *CalcContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *CalcContext) GetOp() antlr.Token { return s.op }
+
+func (s *CalcContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *CalcContext) Unary() IUnaryContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IUnaryContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IUnaryContext)
+}
+
+func (s *CalcContext) AllCalc() []ICalcContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(ICalcContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]ICalcContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(ICalcContext); ok {
+ tst[i] = t.(ICalcContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *CalcContext) Calc(i int) ICalcContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(ICalcContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(ICalcContext)
+}
+
+func (s *CalcContext) STAR() antlr.TerminalNode {
+ return s.GetToken(CELParserSTAR, 0)
+}
+
+func (s *CalcContext) SLASH() antlr.TerminalNode {
+ return s.GetToken(CELParserSLASH, 0)
+}
+
+func (s *CalcContext) PERCENT() antlr.TerminalNode {
+ return s.GetToken(CELParserPERCENT, 0)
+}
+
+func (s *CalcContext) PLUS() antlr.TerminalNode {
+ return s.GetToken(CELParserPLUS, 0)
+}
+
+func (s *CalcContext) MINUS() antlr.TerminalNode {
+ return s.GetToken(CELParserMINUS, 0)
+}
+
+func (s *CalcContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *CalcContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *CalcContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterCalc(s)
+ }
+}
+
+func (s *CalcContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitCalc(s)
+ }
+}
+
+func (s *CalcContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitCalc(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) Calc() (localctx ICalcContext) {
+ return p.calc(0)
+}
+
+func (p *CELParser) calc(_p int) (localctx ICalcContext) {
+ this := p
+ _ = this
+
+ var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
+ _parentState := p.GetState()
+ localctx = NewCalcContext(p, p.GetParserRuleContext(), _parentState)
+ var _prevctx ICalcContext = localctx
+ var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning.
+ _startState := 10
+ p.EnterRecursionRule(localctx, 10, CELParserRULE_calc, _p)
+ var _la int
+
+ defer func() {
+ p.UnrollRecursionContexts(_parentctx)
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(71)
+ p.Unary()
+ }
+
+ p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
+ p.SetState(81)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 5, p.GetParserRuleContext())
+
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ if p.GetParseListeners() != nil {
+ p.TriggerExitRuleEvent()
+ }
+ _prevctx = localctx
+ p.SetState(79)
+ p.GetErrorHandler().Sync(p)
+ switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 4, p.GetParserRuleContext()) {
+ case 1:
+ localctx = NewCalcContext(p, _parentctx, _parentState)
+ p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc)
+ p.SetState(73)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 2)) {
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", ""))
+ }
+ {
+ p.SetState(74)
+
+ var _lt = p.GetTokenStream().LT(1)
+
+ localctx.(*CalcContext).op = _lt
+
+ _la = p.GetTokenStream().LA(1)
+
+ if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&58720256) != 0) {
+ var _ri = p.GetErrorHandler().RecoverInline(p)
+
+ localctx.(*CalcContext).op = _ri
+ } else {
+ p.GetErrorHandler().ReportMatch(p)
+ p.Consume()
+ }
+ }
+ {
+ p.SetState(75)
+ p.calc(3)
+ }
+
+ case 2:
+ localctx = NewCalcContext(p, _parentctx, _parentState)
+ p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc)
+ p.SetState(76)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 1)) {
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
+ }
+ {
+ p.SetState(77)
+
+ var _lt = p.GetTokenStream().LT(1)
+
+ localctx.(*CalcContext).op = _lt
+
+ _la = p.GetTokenStream().LA(1)
+
+ if !(_la == CELParserMINUS || _la == CELParserPLUS) {
+ var _ri = p.GetErrorHandler().RecoverInline(p)
+
+ localctx.(*CalcContext).op = _ri
+ } else {
+ p.GetErrorHandler().ReportMatch(p)
+ p.Consume()
+ }
+ }
+ {
+ p.SetState(78)
+ p.calc(2)
+ }
+
+ }
+
+ }
+ p.SetState(83)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 5, p.GetParserRuleContext())
+ }
+
+ return localctx
+}
+
+// IUnaryContext is an interface to support dynamic dispatch.
+type IUnaryContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+ // IsUnaryContext differentiates from other interfaces.
+ IsUnaryContext()
+}
+
+type UnaryContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+}
+
+func NewEmptyUnaryContext() *UnaryContext {
+ var p = new(UnaryContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_unary
+ return p
+}
+
+func (*UnaryContext) IsUnaryContext() {}
+
+func NewUnaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *UnaryContext {
+ var p = new(UnaryContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_unary
+
+ return p
+}
+
+func (s *UnaryContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *UnaryContext) CopyFrom(ctx *UnaryContext) {
+ s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext)
+}
+
+func (s *UnaryContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *UnaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+type LogicalNotContext struct {
+ *UnaryContext
+ s19 antlr.Token
+ ops []antlr.Token
+}
+
+func NewLogicalNotContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *LogicalNotContext {
+ var p = new(LogicalNotContext)
+
+ p.UnaryContext = NewEmptyUnaryContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*UnaryContext))
+
+ return p
+}
+
+func (s *LogicalNotContext) GetS19() antlr.Token { return s.s19 }
+
+func (s *LogicalNotContext) SetS19(v antlr.Token) { s.s19 = v }
+
+func (s *LogicalNotContext) GetOps() []antlr.Token { return s.ops }
+
+func (s *LogicalNotContext) SetOps(v []antlr.Token) { s.ops = v }
+
+func (s *LogicalNotContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *LogicalNotContext) Member() IMemberContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IMemberContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IMemberContext)
+}
+
+func (s *LogicalNotContext) AllEXCLAM() []antlr.TerminalNode {
+ return s.GetTokens(CELParserEXCLAM)
+}
+
+func (s *LogicalNotContext) EXCLAM(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserEXCLAM, i)
+}
+
+func (s *LogicalNotContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterLogicalNot(s)
+ }
+}
+
+func (s *LogicalNotContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitLogicalNot(s)
+ }
+}
+
+func (s *LogicalNotContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitLogicalNot(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type MemberExprContext struct {
+ *UnaryContext
+}
+
+func NewMemberExprContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberExprContext {
+ var p = new(MemberExprContext)
+
+ p.UnaryContext = NewEmptyUnaryContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*UnaryContext))
+
+ return p
+}
+
+func (s *MemberExprContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *MemberExprContext) Member() IMemberContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IMemberContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IMemberContext)
+}
+
+func (s *MemberExprContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterMemberExpr(s)
+ }
+}
+
+func (s *MemberExprContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitMemberExpr(s)
+ }
+}
+
+func (s *MemberExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitMemberExpr(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type NegateContext struct {
+ *UnaryContext
+ s18 antlr.Token
+ ops []antlr.Token
+}
+
+func NewNegateContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NegateContext {
+ var p = new(NegateContext)
+
+ p.UnaryContext = NewEmptyUnaryContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*UnaryContext))
+
+ return p
+}
+
+func (s *NegateContext) GetS18() antlr.Token { return s.s18 }
+
+func (s *NegateContext) SetS18(v antlr.Token) { s.s18 = v }
+
+func (s *NegateContext) GetOps() []antlr.Token { return s.ops }
+
+func (s *NegateContext) SetOps(v []antlr.Token) { s.ops = v }
+
+func (s *NegateContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *NegateContext) Member() IMemberContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IMemberContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IMemberContext)
+}
+
+func (s *NegateContext) AllMINUS() []antlr.TerminalNode {
+ return s.GetTokens(CELParserMINUS)
+}
+
+func (s *NegateContext) MINUS(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserMINUS, i)
+}
+
+func (s *NegateContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterNegate(s)
+ }
+}
+
+func (s *NegateContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitNegate(s)
+ }
+}
+
+func (s *NegateContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitNegate(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) Unary() (localctx IUnaryContext) {
+ this := p
+ _ = this
+
+ localctx = NewUnaryContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 12, CELParserRULE_unary)
+ var _la int
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ var _alt int
+
+ p.SetState(97)
+ p.GetErrorHandler().Sync(p)
+ switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 8, p.GetParserRuleContext()) {
+ case 1:
+ localctx = NewMemberExprContext(p, localctx)
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(84)
+ p.member(0)
+ }
+
+ case 2:
+ localctx = NewLogicalNotContext(p, localctx)
+ p.EnterOuterAlt(localctx, 2)
+ p.SetState(86)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ for ok := true; ok; ok = _la == CELParserEXCLAM {
+ {
+ p.SetState(85)
+
+ var _m = p.Match(CELParserEXCLAM)
+
+ localctx.(*LogicalNotContext).s19 = _m
+ }
+ localctx.(*LogicalNotContext).ops = append(localctx.(*LogicalNotContext).ops, localctx.(*LogicalNotContext).s19)
+
+ p.SetState(88)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+ }
+ {
+ p.SetState(90)
+ p.member(0)
+ }
+
+ case 3:
+ localctx = NewNegateContext(p, localctx)
+ p.EnterOuterAlt(localctx, 3)
+ p.SetState(92)
+ p.GetErrorHandler().Sync(p)
+ _alt = 1
+ for ok := true; ok; ok = _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ switch _alt {
+ case 1:
+ {
+ p.SetState(91)
+
+ var _m = p.Match(CELParserMINUS)
+
+ localctx.(*NegateContext).s18 = _m
+ }
+ localctx.(*NegateContext).ops = append(localctx.(*NegateContext).ops, localctx.(*NegateContext).s18)
+
+ default:
+ panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil))
+ }
+
+ p.SetState(94)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 7, p.GetParserRuleContext())
+ }
+ {
+ p.SetState(96)
+ p.member(0)
+ }
+
+ }
+
+ return localctx
+}
+
+// IMemberContext is an interface to support dynamic dispatch.
+type IMemberContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+ // IsMemberContext differentiates from other interfaces.
+ IsMemberContext()
+}
+
+type MemberContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+}
+
+func NewEmptyMemberContext() *MemberContext {
+ var p = new(MemberContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_member
+ return p
+}
+
+func (*MemberContext) IsMemberContext() {}
+
+func NewMemberContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MemberContext {
+ var p = new(MemberContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_member
+
+ return p
+}
+
+func (s *MemberContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *MemberContext) CopyFrom(ctx *MemberContext) {
+ s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext)
+}
+
+func (s *MemberContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *MemberContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+type MemberCallContext struct {
+ *MemberContext
+ op antlr.Token
+ id antlr.Token
+ open antlr.Token
+ args IExprListContext
+}
+
+func NewMemberCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberCallContext {
+ var p = new(MemberCallContext)
+
+ p.MemberContext = NewEmptyMemberContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*MemberContext))
+
+ return p
+}
+
+func (s *MemberCallContext) GetOp() antlr.Token { return s.op }
+
+func (s *MemberCallContext) GetId() antlr.Token { return s.id }
+
+func (s *MemberCallContext) GetOpen() antlr.Token { return s.open }
+
+func (s *MemberCallContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *MemberCallContext) SetId(v antlr.Token) { s.id = v }
+
+func (s *MemberCallContext) SetOpen(v antlr.Token) { s.open = v }
+
+func (s *MemberCallContext) GetArgs() IExprListContext { return s.args }
+
+func (s *MemberCallContext) SetArgs(v IExprListContext) { s.args = v }
+
+func (s *MemberCallContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *MemberCallContext) Member() IMemberContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IMemberContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IMemberContext)
+}
+
+func (s *MemberCallContext) RPAREN() antlr.TerminalNode {
+ return s.GetToken(CELParserRPAREN, 0)
+}
+
+func (s *MemberCallContext) DOT() antlr.TerminalNode {
+ return s.GetToken(CELParserDOT, 0)
+}
+
+func (s *MemberCallContext) IDENTIFIER() antlr.TerminalNode {
+ return s.GetToken(CELParserIDENTIFIER, 0)
+}
+
+func (s *MemberCallContext) LPAREN() antlr.TerminalNode {
+ return s.GetToken(CELParserLPAREN, 0)
+}
+
+func (s *MemberCallContext) ExprList() IExprListContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprListContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprListContext)
+}
+
+func (s *MemberCallContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterMemberCall(s)
+ }
+}
+
+func (s *MemberCallContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitMemberCall(s)
+ }
+}
+
+func (s *MemberCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitMemberCall(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type SelectContext struct {
+ *MemberContext
+ op antlr.Token
+ opt antlr.Token
+ id antlr.Token
+}
+
+func NewSelectContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *SelectContext {
+ var p = new(SelectContext)
+
+ p.MemberContext = NewEmptyMemberContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*MemberContext))
+
+ return p
+}
+
+func (s *SelectContext) GetOp() antlr.Token { return s.op }
+
+func (s *SelectContext) GetOpt() antlr.Token { return s.opt }
+
+func (s *SelectContext) GetId() antlr.Token { return s.id }
+
+func (s *SelectContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *SelectContext) SetOpt(v antlr.Token) { s.opt = v }
+
+func (s *SelectContext) SetId(v antlr.Token) { s.id = v }
+
+func (s *SelectContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *SelectContext) Member() IMemberContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IMemberContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IMemberContext)
+}
+
+func (s *SelectContext) DOT() antlr.TerminalNode {
+ return s.GetToken(CELParserDOT, 0)
+}
+
+func (s *SelectContext) IDENTIFIER() antlr.TerminalNode {
+ return s.GetToken(CELParserIDENTIFIER, 0)
+}
+
+func (s *SelectContext) QUESTIONMARK() antlr.TerminalNode {
+ return s.GetToken(CELParserQUESTIONMARK, 0)
+}
+
+func (s *SelectContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterSelect(s)
+ }
+}
+
+func (s *SelectContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitSelect(s)
+ }
+}
+
+func (s *SelectContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitSelect(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type PrimaryExprContext struct {
+ *MemberContext
+}
+
+func NewPrimaryExprContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *PrimaryExprContext {
+ var p = new(PrimaryExprContext)
+
+ p.MemberContext = NewEmptyMemberContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*MemberContext))
+
+ return p
+}
+
+func (s *PrimaryExprContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *PrimaryExprContext) Primary() IPrimaryContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IPrimaryContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IPrimaryContext)
+}
+
+func (s *PrimaryExprContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterPrimaryExpr(s)
+ }
+}
+
+func (s *PrimaryExprContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitPrimaryExpr(s)
+ }
+}
+
+func (s *PrimaryExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitPrimaryExpr(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type IndexContext struct {
+ *MemberContext
+ op antlr.Token
+ opt antlr.Token
+ index IExprContext
+}
+
+func NewIndexContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IndexContext {
+ var p = new(IndexContext)
+
+ p.MemberContext = NewEmptyMemberContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*MemberContext))
+
+ return p
+}
+
+func (s *IndexContext) GetOp() antlr.Token { return s.op }
+
+func (s *IndexContext) GetOpt() antlr.Token { return s.opt }
+
+func (s *IndexContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *IndexContext) SetOpt(v antlr.Token) { s.opt = v }
+
+func (s *IndexContext) GetIndex() IExprContext { return s.index }
+
+func (s *IndexContext) SetIndex(v IExprContext) { s.index = v }
+
+func (s *IndexContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *IndexContext) Member() IMemberContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IMemberContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IMemberContext)
+}
+
+func (s *IndexContext) RPRACKET() antlr.TerminalNode {
+ return s.GetToken(CELParserRPRACKET, 0)
+}
+
+func (s *IndexContext) LBRACKET() antlr.TerminalNode {
+ return s.GetToken(CELParserLBRACKET, 0)
+}
+
+func (s *IndexContext) Expr() IExprContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *IndexContext) QUESTIONMARK() antlr.TerminalNode {
+ return s.GetToken(CELParserQUESTIONMARK, 0)
+}
+
+func (s *IndexContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterIndex(s)
+ }
+}
+
+func (s *IndexContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitIndex(s)
+ }
+}
+
+func (s *IndexContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitIndex(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) Member() (localctx IMemberContext) {
+ return p.member(0)
+}
+
+func (p *CELParser) member(_p int) (localctx IMemberContext) {
+ this := p
+ _ = this
+
+ var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
+ _parentState := p.GetState()
+ localctx = NewMemberContext(p, p.GetParserRuleContext(), _parentState)
+ var _prevctx IMemberContext = localctx
+ var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning.
+ _startState := 14
+ p.EnterRecursionRule(localctx, 14, CELParserRULE_member, _p)
+ var _la int
+
+ defer func() {
+ p.UnrollRecursionContexts(_parentctx)
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ localctx = NewPrimaryExprContext(p, localctx)
+ p.SetParserRuleContext(localctx)
+ _prevctx = localctx
+
+ {
+ p.SetState(100)
+ p.Primary()
+ }
+
+ p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
+ p.SetState(126)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 13, p.GetParserRuleContext())
+
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ if p.GetParseListeners() != nil {
+ p.TriggerExitRuleEvent()
+ }
+ _prevctx = localctx
+ p.SetState(124)
+ p.GetErrorHandler().Sync(p)
+ switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 12, p.GetParserRuleContext()) {
+ case 1:
+ localctx = NewSelectContext(p, NewMemberContext(p, _parentctx, _parentState))
+ p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member)
+ p.SetState(102)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 3)) {
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 3)", ""))
+ }
+ {
+ p.SetState(103)
+
+ var _m = p.Match(CELParserDOT)
+
+ localctx.(*SelectContext).op = _m
+ }
+ p.SetState(105)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if _la == CELParserQUESTIONMARK {
+ {
+ p.SetState(104)
+
+ var _m = p.Match(CELParserQUESTIONMARK)
+
+ localctx.(*SelectContext).opt = _m
+ }
+
+ }
+ {
+ p.SetState(107)
+
+ var _m = p.Match(CELParserIDENTIFIER)
+
+ localctx.(*SelectContext).id = _m
+ }
+
+ case 2:
+ localctx = NewMemberCallContext(p, NewMemberContext(p, _parentctx, _parentState))
+ p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member)
+ p.SetState(108)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 2)) {
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", ""))
+ }
+ {
+ p.SetState(109)
+
+ var _m = p.Match(CELParserDOT)
+
+ localctx.(*MemberCallContext).op = _m
+ }
+ {
+ p.SetState(110)
+
+ var _m = p.Match(CELParserIDENTIFIER)
+
+ localctx.(*MemberCallContext).id = _m
+ }
+ {
+ p.SetState(111)
+
+ var _m = p.Match(CELParserLPAREN)
+
+ localctx.(*MemberCallContext).open = _m
+ }
+ p.SetState(113)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135762105344) != 0 {
+ {
+ p.SetState(112)
+
+ var _x = p.ExprList()
+
+ localctx.(*MemberCallContext).args = _x
+ }
+
+ }
+ {
+ p.SetState(115)
+ p.Match(CELParserRPAREN)
+ }
+
+ case 3:
+ localctx = NewIndexContext(p, NewMemberContext(p, _parentctx, _parentState))
+ p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member)
+ p.SetState(116)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 1)) {
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
+ }
+ {
+ p.SetState(117)
+
+ var _m = p.Match(CELParserLBRACKET)
+
+ localctx.(*IndexContext).op = _m
+ }
+ p.SetState(119)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if _la == CELParserQUESTIONMARK {
+ {
+ p.SetState(118)
+
+ var _m = p.Match(CELParserQUESTIONMARK)
+
+ localctx.(*IndexContext).opt = _m
+ }
+
+ }
+ {
+ p.SetState(121)
+
+ var _x = p.Expr()
+
+ localctx.(*IndexContext).index = _x
+ }
+ {
+ p.SetState(122)
+ p.Match(CELParserRPRACKET)
+ }
+
+ }
+
+ }
+ p.SetState(128)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 13, p.GetParserRuleContext())
+ }
+
+ return localctx
+}
+
+// IPrimaryContext is an interface to support dynamic dispatch.
+type IPrimaryContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+ // IsPrimaryContext differentiates from other interfaces.
+ IsPrimaryContext()
+}
+
+type PrimaryContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+}
+
+func NewEmptyPrimaryContext() *PrimaryContext {
+ var p = new(PrimaryContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_primary
+ return p
+}
+
+func (*PrimaryContext) IsPrimaryContext() {}
+
+func NewPrimaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *PrimaryContext {
+ var p = new(PrimaryContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_primary
+
+ return p
+}
+
+func (s *PrimaryContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *PrimaryContext) CopyFrom(ctx *PrimaryContext) {
+ s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext)
+}
+
+func (s *PrimaryContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *PrimaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+type CreateListContext struct {
+ *PrimaryContext
+ op antlr.Token
+ elems IListInitContext
+}
+
+func NewCreateListContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateListContext {
+ var p = new(CreateListContext)
+
+ p.PrimaryContext = NewEmptyPrimaryContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*PrimaryContext))
+
+ return p
+}
+
+func (s *CreateListContext) GetOp() antlr.Token { return s.op }
+
+func (s *CreateListContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *CreateListContext) GetElems() IListInitContext { return s.elems }
+
+func (s *CreateListContext) SetElems(v IListInitContext) { s.elems = v }
+
+func (s *CreateListContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *CreateListContext) RPRACKET() antlr.TerminalNode {
+ return s.GetToken(CELParserRPRACKET, 0)
+}
+
+func (s *CreateListContext) LBRACKET() antlr.TerminalNode {
+ return s.GetToken(CELParserLBRACKET, 0)
+}
+
+func (s *CreateListContext) COMMA() antlr.TerminalNode {
+ return s.GetToken(CELParserCOMMA, 0)
+}
+
+func (s *CreateListContext) ListInit() IListInitContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IListInitContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IListInitContext)
+}
+
+func (s *CreateListContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterCreateList(s)
+ }
+}
+
+func (s *CreateListContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitCreateList(s)
+ }
+}
+
+func (s *CreateListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitCreateList(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type CreateStructContext struct {
+ *PrimaryContext
+ op antlr.Token
+ entries IMapInitializerListContext
+}
+
+func NewCreateStructContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateStructContext {
+ var p = new(CreateStructContext)
+
+ p.PrimaryContext = NewEmptyPrimaryContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*PrimaryContext))
+
+ return p
+}
+
+func (s *CreateStructContext) GetOp() antlr.Token { return s.op }
+
+func (s *CreateStructContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *CreateStructContext) GetEntries() IMapInitializerListContext { return s.entries }
+
+func (s *CreateStructContext) SetEntries(v IMapInitializerListContext) { s.entries = v }
+
+func (s *CreateStructContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *CreateStructContext) RBRACE() antlr.TerminalNode {
+ return s.GetToken(CELParserRBRACE, 0)
+}
+
+func (s *CreateStructContext) LBRACE() antlr.TerminalNode {
+ return s.GetToken(CELParserLBRACE, 0)
+}
+
+func (s *CreateStructContext) COMMA() antlr.TerminalNode {
+ return s.GetToken(CELParserCOMMA, 0)
+}
+
+func (s *CreateStructContext) MapInitializerList() IMapInitializerListContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IMapInitializerListContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IMapInitializerListContext)
+}
+
+func (s *CreateStructContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterCreateStruct(s)
+ }
+}
+
+func (s *CreateStructContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitCreateStruct(s)
+ }
+}
+
+func (s *CreateStructContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitCreateStruct(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type ConstantLiteralContext struct {
+ *PrimaryContext
+}
+
+func NewConstantLiteralContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ConstantLiteralContext {
+ var p = new(ConstantLiteralContext)
+
+ p.PrimaryContext = NewEmptyPrimaryContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*PrimaryContext))
+
+ return p
+}
+
+func (s *ConstantLiteralContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ConstantLiteralContext) Literal() ILiteralContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(ILiteralContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(ILiteralContext)
+}
+
+func (s *ConstantLiteralContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterConstantLiteral(s)
+ }
+}
+
+func (s *ConstantLiteralContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitConstantLiteral(s)
+ }
+}
+
+func (s *ConstantLiteralContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitConstantLiteral(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type NestedContext struct {
+ *PrimaryContext
+ e IExprContext
+}
+
+func NewNestedContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NestedContext {
+ var p = new(NestedContext)
+
+ p.PrimaryContext = NewEmptyPrimaryContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*PrimaryContext))
+
+ return p
+}
+
+func (s *NestedContext) GetE() IExprContext { return s.e }
+
+func (s *NestedContext) SetE(v IExprContext) { s.e = v }
+
+func (s *NestedContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *NestedContext) LPAREN() antlr.TerminalNode {
+ return s.GetToken(CELParserLPAREN, 0)
+}
+
+func (s *NestedContext) RPAREN() antlr.TerminalNode {
+ return s.GetToken(CELParserRPAREN, 0)
+}
+
+func (s *NestedContext) Expr() IExprContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *NestedContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterNested(s)
+ }
+}
+
+func (s *NestedContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitNested(s)
+ }
+}
+
+func (s *NestedContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitNested(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type CreateMessageContext struct {
+ *PrimaryContext
+ leadingDot antlr.Token
+ _IDENTIFIER antlr.Token
+ ids []antlr.Token
+ s16 antlr.Token
+ ops []antlr.Token
+ op antlr.Token
+ entries IFieldInitializerListContext
+}
+
+func NewCreateMessageContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateMessageContext {
+ var p = new(CreateMessageContext)
+
+ p.PrimaryContext = NewEmptyPrimaryContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*PrimaryContext))
+
+ return p
+}
+
+func (s *CreateMessageContext) GetLeadingDot() antlr.Token { return s.leadingDot }
+
+func (s *CreateMessageContext) Get_IDENTIFIER() antlr.Token { return s._IDENTIFIER }
+
+func (s *CreateMessageContext) GetS16() antlr.Token { return s.s16 }
+
+func (s *CreateMessageContext) GetOp() antlr.Token { return s.op }
+
+func (s *CreateMessageContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v }
+
+func (s *CreateMessageContext) Set_IDENTIFIER(v antlr.Token) { s._IDENTIFIER = v }
+
+func (s *CreateMessageContext) SetS16(v antlr.Token) { s.s16 = v }
+
+func (s *CreateMessageContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *CreateMessageContext) GetIds() []antlr.Token { return s.ids }
+
+func (s *CreateMessageContext) GetOps() []antlr.Token { return s.ops }
+
+func (s *CreateMessageContext) SetIds(v []antlr.Token) { s.ids = v }
+
+func (s *CreateMessageContext) SetOps(v []antlr.Token) { s.ops = v }
+
+func (s *CreateMessageContext) GetEntries() IFieldInitializerListContext { return s.entries }
+
+func (s *CreateMessageContext) SetEntries(v IFieldInitializerListContext) { s.entries = v }
+
+func (s *CreateMessageContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *CreateMessageContext) RBRACE() antlr.TerminalNode {
+ return s.GetToken(CELParserRBRACE, 0)
+}
+
+func (s *CreateMessageContext) AllIDENTIFIER() []antlr.TerminalNode {
+ return s.GetTokens(CELParserIDENTIFIER)
+}
+
+func (s *CreateMessageContext) IDENTIFIER(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserIDENTIFIER, i)
+}
+
+func (s *CreateMessageContext) LBRACE() antlr.TerminalNode {
+ return s.GetToken(CELParserLBRACE, 0)
+}
+
+func (s *CreateMessageContext) COMMA() antlr.TerminalNode {
+ return s.GetToken(CELParserCOMMA, 0)
+}
+
+func (s *CreateMessageContext) AllDOT() []antlr.TerminalNode {
+ return s.GetTokens(CELParserDOT)
+}
+
+func (s *CreateMessageContext) DOT(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserDOT, i)
+}
+
+func (s *CreateMessageContext) FieldInitializerList() IFieldInitializerListContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IFieldInitializerListContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IFieldInitializerListContext)
+}
+
+func (s *CreateMessageContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterCreateMessage(s)
+ }
+}
+
+func (s *CreateMessageContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitCreateMessage(s)
+ }
+}
+
+func (s *CreateMessageContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitCreateMessage(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type IdentOrGlobalCallContext struct {
+ *PrimaryContext
+ leadingDot antlr.Token
+ id antlr.Token
+ op antlr.Token
+ args IExprListContext
+}
+
+func NewIdentOrGlobalCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IdentOrGlobalCallContext {
+ var p = new(IdentOrGlobalCallContext)
+
+ p.PrimaryContext = NewEmptyPrimaryContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*PrimaryContext))
+
+ return p
+}
+
+func (s *IdentOrGlobalCallContext) GetLeadingDot() antlr.Token { return s.leadingDot }
+
+func (s *IdentOrGlobalCallContext) GetId() antlr.Token { return s.id }
+
+func (s *IdentOrGlobalCallContext) GetOp() antlr.Token { return s.op }
+
+func (s *IdentOrGlobalCallContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v }
+
+func (s *IdentOrGlobalCallContext) SetId(v antlr.Token) { s.id = v }
+
+func (s *IdentOrGlobalCallContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *IdentOrGlobalCallContext) GetArgs() IExprListContext { return s.args }
+
+func (s *IdentOrGlobalCallContext) SetArgs(v IExprListContext) { s.args = v }
+
+func (s *IdentOrGlobalCallContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *IdentOrGlobalCallContext) IDENTIFIER() antlr.TerminalNode {
+ return s.GetToken(CELParserIDENTIFIER, 0)
+}
+
+func (s *IdentOrGlobalCallContext) RPAREN() antlr.TerminalNode {
+ return s.GetToken(CELParserRPAREN, 0)
+}
+
+func (s *IdentOrGlobalCallContext) DOT() antlr.TerminalNode {
+ return s.GetToken(CELParserDOT, 0)
+}
+
+func (s *IdentOrGlobalCallContext) LPAREN() antlr.TerminalNode {
+ return s.GetToken(CELParserLPAREN, 0)
+}
+
+func (s *IdentOrGlobalCallContext) ExprList() IExprListContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprListContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprListContext)
+}
+
+func (s *IdentOrGlobalCallContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterIdentOrGlobalCall(s)
+ }
+}
+
+func (s *IdentOrGlobalCallContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitIdentOrGlobalCall(s)
+ }
+}
+
+func (s *IdentOrGlobalCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitIdentOrGlobalCall(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) Primary() (localctx IPrimaryContext) {
+ this := p
+ _ = this
+
+ localctx = NewPrimaryContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 16, CELParserRULE_primary)
+ var _la int
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.SetState(180)
+ p.GetErrorHandler().Sync(p)
+ switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 25, p.GetParserRuleContext()) {
+ case 1:
+ localctx = NewIdentOrGlobalCallContext(p, localctx)
+ p.EnterOuterAlt(localctx, 1)
+ p.SetState(130)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if _la == CELParserDOT {
+ {
+ p.SetState(129)
+
+ var _m = p.Match(CELParserDOT)
+
+ localctx.(*IdentOrGlobalCallContext).leadingDot = _m
+ }
+
+ }
+ {
+ p.SetState(132)
+
+ var _m = p.Match(CELParserIDENTIFIER)
+
+ localctx.(*IdentOrGlobalCallContext).id = _m
+ }
+ p.SetState(138)
+ p.GetErrorHandler().Sync(p)
+
+ if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 16, p.GetParserRuleContext()) == 1 {
+ {
+ p.SetState(133)
+
+ var _m = p.Match(CELParserLPAREN)
+
+ localctx.(*IdentOrGlobalCallContext).op = _m
+ }
+ p.SetState(135)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135762105344) != 0 {
+ {
+ p.SetState(134)
+
+ var _x = p.ExprList()
+
+ localctx.(*IdentOrGlobalCallContext).args = _x
+ }
+
+ }
+ {
+ p.SetState(137)
+ p.Match(CELParserRPAREN)
+ }
+
+ }
+
+ case 2:
+ localctx = NewNestedContext(p, localctx)
+ p.EnterOuterAlt(localctx, 2)
+ {
+ p.SetState(140)
+ p.Match(CELParserLPAREN)
+ }
+ {
+ p.SetState(141)
+
+ var _x = p.Expr()
+
+ localctx.(*NestedContext).e = _x
+ }
+ {
+ p.SetState(142)
+ p.Match(CELParserRPAREN)
+ }
+
+ case 3:
+ localctx = NewCreateListContext(p, localctx)
+ p.EnterOuterAlt(localctx, 3)
+ {
+ p.SetState(144)
+
+ var _m = p.Match(CELParserLBRACKET)
+
+ localctx.(*CreateListContext).op = _m
+ }
+ p.SetState(146)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135763153920) != 0 {
+ {
+ p.SetState(145)
+
+ var _x = p.ListInit()
+
+ localctx.(*CreateListContext).elems = _x
+ }
+
+ }
+ p.SetState(149)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if _la == CELParserCOMMA {
+ {
+ p.SetState(148)
+ p.Match(CELParserCOMMA)
+ }
+
+ }
+ {
+ p.SetState(151)
+ p.Match(CELParserRPRACKET)
+ }
+
+ case 4:
+ localctx = NewCreateStructContext(p, localctx)
+ p.EnterOuterAlt(localctx, 4)
+ {
+ p.SetState(152)
+
+ var _m = p.Match(CELParserLBRACE)
+
+ localctx.(*CreateStructContext).op = _m
+ }
+ p.SetState(154)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135763153920) != 0 {
+ {
+ p.SetState(153)
+
+ var _x = p.MapInitializerList()
+
+ localctx.(*CreateStructContext).entries = _x
+ }
+
+ }
+ p.SetState(157)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if _la == CELParserCOMMA {
+ {
+ p.SetState(156)
+ p.Match(CELParserCOMMA)
+ }
+
+ }
+ {
+ p.SetState(159)
+ p.Match(CELParserRBRACE)
+ }
+
+ case 5:
+ localctx = NewCreateMessageContext(p, localctx)
+ p.EnterOuterAlt(localctx, 5)
+ p.SetState(161)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if _la == CELParserDOT {
+ {
+ p.SetState(160)
+
+ var _m = p.Match(CELParserDOT)
+
+ localctx.(*CreateMessageContext).leadingDot = _m
+ }
+
+ }
+ {
+ p.SetState(163)
+
+ var _m = p.Match(CELParserIDENTIFIER)
+
+ localctx.(*CreateMessageContext)._IDENTIFIER = _m
+ }
+ localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER)
+ p.SetState(168)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ for _la == CELParserDOT {
+ {
+ p.SetState(164)
+
+ var _m = p.Match(CELParserDOT)
+
+ localctx.(*CreateMessageContext).s16 = _m
+ }
+ localctx.(*CreateMessageContext).ops = append(localctx.(*CreateMessageContext).ops, localctx.(*CreateMessageContext).s16)
+ {
+ p.SetState(165)
+
+ var _m = p.Match(CELParserIDENTIFIER)
+
+ localctx.(*CreateMessageContext)._IDENTIFIER = _m
+ }
+ localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER)
+
+ p.SetState(170)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+ }
+ {
+ p.SetState(171)
+
+ var _m = p.Match(CELParserLBRACE)
+
+ localctx.(*CreateMessageContext).op = _m
+ }
+ p.SetState(173)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if _la == CELParserQUESTIONMARK || _la == CELParserIDENTIFIER {
+ {
+ p.SetState(172)
+
+ var _x = p.FieldInitializerList()
+
+ localctx.(*CreateMessageContext).entries = _x
+ }
+
+ }
+ p.SetState(176)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if _la == CELParserCOMMA {
+ {
+ p.SetState(175)
+ p.Match(CELParserCOMMA)
+ }
+
+ }
+ {
+ p.SetState(178)
+ p.Match(CELParserRBRACE)
+ }
+
+ case 6:
+ localctx = NewConstantLiteralContext(p, localctx)
+ p.EnterOuterAlt(localctx, 6)
+ {
+ p.SetState(179)
+ p.Literal()
+ }
+
+ }
+
+ return localctx
+}
+
+// IExprListContext is an interface to support dynamic dispatch.
+type IExprListContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // Get_expr returns the _expr rule contexts.
+ Get_expr() IExprContext
+
+ // Set_expr sets the _expr rule contexts.
+ Set_expr(IExprContext)
+
+ // GetE returns the e rule context list.
+ GetE() []IExprContext
+
+ // SetE sets the e rule context list.
+ SetE([]IExprContext)
+
+ // Getter signatures
+ AllExpr() []IExprContext
+ Expr(i int) IExprContext
+ AllCOMMA() []antlr.TerminalNode
+ COMMA(i int) antlr.TerminalNode
+
+ // IsExprListContext differentiates from other interfaces.
+ IsExprListContext()
+}
+
+type ExprListContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ _expr IExprContext
+ e []IExprContext
+}
+
+func NewEmptyExprListContext() *ExprListContext {
+ var p = new(ExprListContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_exprList
+ return p
+}
+
+func (*ExprListContext) IsExprListContext() {}
+
+func NewExprListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprListContext {
+ var p = new(ExprListContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_exprList
+
+ return p
+}
+
+func (s *ExprListContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ExprListContext) Get_expr() IExprContext { return s._expr }
+
+func (s *ExprListContext) Set_expr(v IExprContext) { s._expr = v }
+
+func (s *ExprListContext) GetE() []IExprContext { return s.e }
+
+func (s *ExprListContext) SetE(v []IExprContext) { s.e = v }
+
+func (s *ExprListContext) AllExpr() []IExprContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IExprContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IExprContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IExprContext); ok {
+ tst[i] = t.(IExprContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *ExprListContext) Expr(i int) IExprContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *ExprListContext) AllCOMMA() []antlr.TerminalNode {
+ return s.GetTokens(CELParserCOMMA)
+}
+
+func (s *ExprListContext) COMMA(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserCOMMA, i)
+}
+
+func (s *ExprListContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ExprListContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *ExprListContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterExprList(s)
+ }
+}
+
+func (s *ExprListContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitExprList(s)
+ }
+}
+
+func (s *ExprListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitExprList(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) ExprList() (localctx IExprListContext) {
+ this := p
+ _ = this
+
+ localctx = NewExprListContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 18, CELParserRULE_exprList)
+ var _la int
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(182)
+
+ var _x = p.Expr()
+
+ localctx.(*ExprListContext)._expr = _x
+ }
+ localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr)
+ p.SetState(187)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ for _la == CELParserCOMMA {
+ {
+ p.SetState(183)
+ p.Match(CELParserCOMMA)
+ }
+ {
+ p.SetState(184)
+
+ var _x = p.Expr()
+
+ localctx.(*ExprListContext)._expr = _x
+ }
+ localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr)
+
+ p.SetState(189)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+ }
+
+ return localctx
+}
+
+// IListInitContext is an interface to support dynamic dispatch.
+type IListInitContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // Get_optExpr returns the _optExpr rule contexts.
+ Get_optExpr() IOptExprContext
+
+ // Set_optExpr sets the _optExpr rule contexts.
+ Set_optExpr(IOptExprContext)
+
+ // GetElems returns the elems rule context list.
+ GetElems() []IOptExprContext
+
+ // SetElems sets the elems rule context list.
+ SetElems([]IOptExprContext)
+
+ // Getter signatures
+ AllOptExpr() []IOptExprContext
+ OptExpr(i int) IOptExprContext
+ AllCOMMA() []antlr.TerminalNode
+ COMMA(i int) antlr.TerminalNode
+
+ // IsListInitContext differentiates from other interfaces.
+ IsListInitContext()
+}
+
+type ListInitContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ _optExpr IOptExprContext
+ elems []IOptExprContext
+}
+
+func NewEmptyListInitContext() *ListInitContext {
+ var p = new(ListInitContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_listInit
+ return p
+}
+
+func (*ListInitContext) IsListInitContext() {}
+
+func NewListInitContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ListInitContext {
+ var p = new(ListInitContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_listInit
+
+ return p
+}
+
+func (s *ListInitContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ListInitContext) Get_optExpr() IOptExprContext { return s._optExpr }
+
+func (s *ListInitContext) Set_optExpr(v IOptExprContext) { s._optExpr = v }
+
+func (s *ListInitContext) GetElems() []IOptExprContext { return s.elems }
+
+func (s *ListInitContext) SetElems(v []IOptExprContext) { s.elems = v }
+
+func (s *ListInitContext) AllOptExpr() []IOptExprContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IOptExprContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IOptExprContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IOptExprContext); ok {
+ tst[i] = t.(IOptExprContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *ListInitContext) OptExpr(i int) IOptExprContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IOptExprContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IOptExprContext)
+}
+
+func (s *ListInitContext) AllCOMMA() []antlr.TerminalNode {
+ return s.GetTokens(CELParserCOMMA)
+}
+
+func (s *ListInitContext) COMMA(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserCOMMA, i)
+}
+
+func (s *ListInitContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ListInitContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *ListInitContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterListInit(s)
+ }
+}
+
+func (s *ListInitContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitListInit(s)
+ }
+}
+
+func (s *ListInitContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitListInit(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) ListInit() (localctx IListInitContext) {
+ this := p
+ _ = this
+
+ localctx = NewListInitContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 20, CELParserRULE_listInit)
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(190)
+
+ var _x = p.OptExpr()
+
+ localctx.(*ListInitContext)._optExpr = _x
+ }
+ localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr)
+ p.SetState(195)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 27, p.GetParserRuleContext())
+
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ {
+ p.SetState(191)
+ p.Match(CELParserCOMMA)
+ }
+ {
+ p.SetState(192)
+
+ var _x = p.OptExpr()
+
+ localctx.(*ListInitContext)._optExpr = _x
+ }
+ localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr)
+
+ }
+ p.SetState(197)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 27, p.GetParserRuleContext())
+ }
+
+ return localctx
+}
+
+// IFieldInitializerListContext is an interface to support dynamic dispatch.
+type IFieldInitializerListContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetS21 returns the s21 token.
+ GetS21() antlr.Token
+
+ // SetS21 sets the s21 token.
+ SetS21(antlr.Token)
+
+ // GetCols returns the cols token list.
+ GetCols() []antlr.Token
+
+ // SetCols sets the cols token list.
+ SetCols([]antlr.Token)
+
+ // Get_optField returns the _optField rule contexts.
+ Get_optField() IOptFieldContext
+
+ // Get_expr returns the _expr rule contexts.
+ Get_expr() IExprContext
+
+ // Set_optField sets the _optField rule contexts.
+ Set_optField(IOptFieldContext)
+
+ // Set_expr sets the _expr rule contexts.
+ Set_expr(IExprContext)
+
+ // GetFields returns the fields rule context list.
+ GetFields() []IOptFieldContext
+
+ // GetValues returns the values rule context list.
+ GetValues() []IExprContext
+
+ // SetFields sets the fields rule context list.
+ SetFields([]IOptFieldContext)
+
+ // SetValues sets the values rule context list.
+ SetValues([]IExprContext)
+
+ // Getter signatures
+ AllOptField() []IOptFieldContext
+ OptField(i int) IOptFieldContext
+ AllCOLON() []antlr.TerminalNode
+ COLON(i int) antlr.TerminalNode
+ AllExpr() []IExprContext
+ Expr(i int) IExprContext
+ AllCOMMA() []antlr.TerminalNode
+ COMMA(i int) antlr.TerminalNode
+
+ // IsFieldInitializerListContext differentiates from other interfaces.
+ IsFieldInitializerListContext()
+}
+
+type FieldInitializerListContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ _optField IOptFieldContext
+ fields []IOptFieldContext
+ s21 antlr.Token
+ cols []antlr.Token
+ _expr IExprContext
+ values []IExprContext
+}
+
+func NewEmptyFieldInitializerListContext() *FieldInitializerListContext {
+ var p = new(FieldInitializerListContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_fieldInitializerList
+ return p
+}
+
+func (*FieldInitializerListContext) IsFieldInitializerListContext() {}
+
+func NewFieldInitializerListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *FieldInitializerListContext {
+ var p = new(FieldInitializerListContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_fieldInitializerList
+
+ return p
+}
+
+func (s *FieldInitializerListContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *FieldInitializerListContext) GetS21() antlr.Token { return s.s21 }
+
+func (s *FieldInitializerListContext) SetS21(v antlr.Token) { s.s21 = v }
+
+func (s *FieldInitializerListContext) GetCols() []antlr.Token { return s.cols }
+
+func (s *FieldInitializerListContext) SetCols(v []antlr.Token) { s.cols = v }
+
+func (s *FieldInitializerListContext) Get_optField() IOptFieldContext { return s._optField }
+
+func (s *FieldInitializerListContext) Get_expr() IExprContext { return s._expr }
+
+func (s *FieldInitializerListContext) Set_optField(v IOptFieldContext) { s._optField = v }
+
+func (s *FieldInitializerListContext) Set_expr(v IExprContext) { s._expr = v }
+
+func (s *FieldInitializerListContext) GetFields() []IOptFieldContext { return s.fields }
+
+func (s *FieldInitializerListContext) GetValues() []IExprContext { return s.values }
+
+func (s *FieldInitializerListContext) SetFields(v []IOptFieldContext) { s.fields = v }
+
+func (s *FieldInitializerListContext) SetValues(v []IExprContext) { s.values = v }
+
+func (s *FieldInitializerListContext) AllOptField() []IOptFieldContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IOptFieldContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IOptFieldContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IOptFieldContext); ok {
+ tst[i] = t.(IOptFieldContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *FieldInitializerListContext) OptField(i int) IOptFieldContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IOptFieldContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IOptFieldContext)
+}
+
+func (s *FieldInitializerListContext) AllCOLON() []antlr.TerminalNode {
+ return s.GetTokens(CELParserCOLON)
+}
+
+func (s *FieldInitializerListContext) COLON(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserCOLON, i)
+}
+
+func (s *FieldInitializerListContext) AllExpr() []IExprContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IExprContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IExprContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IExprContext); ok {
+ tst[i] = t.(IExprContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *FieldInitializerListContext) Expr(i int) IExprContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *FieldInitializerListContext) AllCOMMA() []antlr.TerminalNode {
+ return s.GetTokens(CELParserCOMMA)
+}
+
+func (s *FieldInitializerListContext) COMMA(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserCOMMA, i)
+}
+
+func (s *FieldInitializerListContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *FieldInitializerListContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *FieldInitializerListContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterFieldInitializerList(s)
+ }
+}
+
+func (s *FieldInitializerListContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitFieldInitializerList(s)
+ }
+}
+
+func (s *FieldInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitFieldInitializerList(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContext) {
+ this := p
+ _ = this
+
+ localctx = NewFieldInitializerListContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 22, CELParserRULE_fieldInitializerList)
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(198)
+
+ var _x = p.OptField()
+
+ localctx.(*FieldInitializerListContext)._optField = _x
+ }
+ localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField)
+ {
+ p.SetState(199)
+
+ var _m = p.Match(CELParserCOLON)
+
+ localctx.(*FieldInitializerListContext).s21 = _m
+ }
+ localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21)
+ {
+ p.SetState(200)
+
+ var _x = p.Expr()
+
+ localctx.(*FieldInitializerListContext)._expr = _x
+ }
+ localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr)
+ p.SetState(208)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 28, p.GetParserRuleContext())
+
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ {
+ p.SetState(201)
+ p.Match(CELParserCOMMA)
+ }
+ {
+ p.SetState(202)
+
+ var _x = p.OptField()
+
+ localctx.(*FieldInitializerListContext)._optField = _x
+ }
+ localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField)
+ {
+ p.SetState(203)
+
+ var _m = p.Match(CELParserCOLON)
+
+ localctx.(*FieldInitializerListContext).s21 = _m
+ }
+ localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21)
+ {
+ p.SetState(204)
+
+ var _x = p.Expr()
+
+ localctx.(*FieldInitializerListContext)._expr = _x
+ }
+ localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr)
+
+ }
+ p.SetState(210)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 28, p.GetParserRuleContext())
+ }
+
+ return localctx
+}
+
+// IOptFieldContext is an interface to support dynamic dispatch.
+type IOptFieldContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetOpt returns the opt token.
+ GetOpt() antlr.Token
+
+ // SetOpt sets the opt token.
+ SetOpt(antlr.Token)
+
+ // Getter signatures
+ IDENTIFIER() antlr.TerminalNode
+ QUESTIONMARK() antlr.TerminalNode
+
+ // IsOptFieldContext differentiates from other interfaces.
+ IsOptFieldContext()
+}
+
+type OptFieldContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ opt antlr.Token
+}
+
+func NewEmptyOptFieldContext() *OptFieldContext {
+ var p = new(OptFieldContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_optField
+ return p
+}
+
+func (*OptFieldContext) IsOptFieldContext() {}
+
+func NewOptFieldContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OptFieldContext {
+ var p = new(OptFieldContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_optField
+
+ return p
+}
+
+func (s *OptFieldContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *OptFieldContext) GetOpt() antlr.Token { return s.opt }
+
+func (s *OptFieldContext) SetOpt(v antlr.Token) { s.opt = v }
+
+func (s *OptFieldContext) IDENTIFIER() antlr.TerminalNode {
+ return s.GetToken(CELParserIDENTIFIER, 0)
+}
+
+func (s *OptFieldContext) QUESTIONMARK() antlr.TerminalNode {
+ return s.GetToken(CELParserQUESTIONMARK, 0)
+}
+
+func (s *OptFieldContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *OptFieldContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *OptFieldContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterOptField(s)
+ }
+}
+
+func (s *OptFieldContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitOptField(s)
+ }
+}
+
+func (s *OptFieldContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitOptField(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) OptField() (localctx IOptFieldContext) {
+ this := p
+ _ = this
+
+ localctx = NewOptFieldContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 24, CELParserRULE_optField)
+ var _la int
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.EnterOuterAlt(localctx, 1)
+ p.SetState(212)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if _la == CELParserQUESTIONMARK {
+ {
+ p.SetState(211)
+
+ var _m = p.Match(CELParserQUESTIONMARK)
+
+ localctx.(*OptFieldContext).opt = _m
+ }
+
+ }
+ {
+ p.SetState(214)
+ p.Match(CELParserIDENTIFIER)
+ }
+
+ return localctx
+}
+
+// IMapInitializerListContext is an interface to support dynamic dispatch.
+type IMapInitializerListContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetS21 returns the s21 token.
+ GetS21() antlr.Token
+
+ // SetS21 sets the s21 token.
+ SetS21(antlr.Token)
+
+ // GetCols returns the cols token list.
+ GetCols() []antlr.Token
+
+ // SetCols sets the cols token list.
+ SetCols([]antlr.Token)
+
+ // Get_optExpr returns the _optExpr rule contexts.
+ Get_optExpr() IOptExprContext
+
+ // Get_expr returns the _expr rule contexts.
+ Get_expr() IExprContext
+
+ // Set_optExpr sets the _optExpr rule contexts.
+ Set_optExpr(IOptExprContext)
+
+ // Set_expr sets the _expr rule contexts.
+ Set_expr(IExprContext)
+
+ // GetKeys returns the keys rule context list.
+ GetKeys() []IOptExprContext
+
+ // GetValues returns the values rule context list.
+ GetValues() []IExprContext
+
+ // SetKeys sets the keys rule context list.
+ SetKeys([]IOptExprContext)
+
+ // SetValues sets the values rule context list.
+ SetValues([]IExprContext)
+
+ // Getter signatures
+ AllOptExpr() []IOptExprContext
+ OptExpr(i int) IOptExprContext
+ AllCOLON() []antlr.TerminalNode
+ COLON(i int) antlr.TerminalNode
+ AllExpr() []IExprContext
+ Expr(i int) IExprContext
+ AllCOMMA() []antlr.TerminalNode
+ COMMA(i int) antlr.TerminalNode
+
+ // IsMapInitializerListContext differentiates from other interfaces.
+ IsMapInitializerListContext()
+}
+
+type MapInitializerListContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ _optExpr IOptExprContext
+ keys []IOptExprContext
+ s21 antlr.Token
+ cols []antlr.Token
+ _expr IExprContext
+ values []IExprContext
+}
+
+func NewEmptyMapInitializerListContext() *MapInitializerListContext {
+ var p = new(MapInitializerListContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_mapInitializerList
+ return p
+}
+
+func (*MapInitializerListContext) IsMapInitializerListContext() {}
+
+func NewMapInitializerListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MapInitializerListContext {
+ var p = new(MapInitializerListContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_mapInitializerList
+
+ return p
+}
+
+func (s *MapInitializerListContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *MapInitializerListContext) GetS21() antlr.Token { return s.s21 }
+
+func (s *MapInitializerListContext) SetS21(v antlr.Token) { s.s21 = v }
+
+func (s *MapInitializerListContext) GetCols() []antlr.Token { return s.cols }
+
+func (s *MapInitializerListContext) SetCols(v []antlr.Token) { s.cols = v }
+
+func (s *MapInitializerListContext) Get_optExpr() IOptExprContext { return s._optExpr }
+
+func (s *MapInitializerListContext) Get_expr() IExprContext { return s._expr }
+
+func (s *MapInitializerListContext) Set_optExpr(v IOptExprContext) { s._optExpr = v }
+
+func (s *MapInitializerListContext) Set_expr(v IExprContext) { s._expr = v }
+
+func (s *MapInitializerListContext) GetKeys() []IOptExprContext { return s.keys }
+
+func (s *MapInitializerListContext) GetValues() []IExprContext { return s.values }
+
+func (s *MapInitializerListContext) SetKeys(v []IOptExprContext) { s.keys = v }
+
+func (s *MapInitializerListContext) SetValues(v []IExprContext) { s.values = v }
+
+func (s *MapInitializerListContext) AllOptExpr() []IOptExprContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IOptExprContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IOptExprContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IOptExprContext); ok {
+ tst[i] = t.(IOptExprContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *MapInitializerListContext) OptExpr(i int) IOptExprContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IOptExprContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IOptExprContext)
+}
+
+func (s *MapInitializerListContext) AllCOLON() []antlr.TerminalNode {
+ return s.GetTokens(CELParserCOLON)
+}
+
+func (s *MapInitializerListContext) COLON(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserCOLON, i)
+}
+
+func (s *MapInitializerListContext) AllExpr() []IExprContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IExprContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IExprContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IExprContext); ok {
+ tst[i] = t.(IExprContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *MapInitializerListContext) Expr(i int) IExprContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *MapInitializerListContext) AllCOMMA() []antlr.TerminalNode {
+ return s.GetTokens(CELParserCOMMA)
+}
+
+func (s *MapInitializerListContext) COMMA(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserCOMMA, i)
+}
+
+func (s *MapInitializerListContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *MapInitializerListContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *MapInitializerListContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterMapInitializerList(s)
+ }
+}
+
+func (s *MapInitializerListContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitMapInitializerList(s)
+ }
+}
+
+func (s *MapInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitMapInitializerList(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) {
+ this := p
+ _ = this
+
+ localctx = NewMapInitializerListContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 26, CELParserRULE_mapInitializerList)
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(216)
+
+ var _x = p.OptExpr()
+
+ localctx.(*MapInitializerListContext)._optExpr = _x
+ }
+ localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr)
+ {
+ p.SetState(217)
+
+ var _m = p.Match(CELParserCOLON)
+
+ localctx.(*MapInitializerListContext).s21 = _m
+ }
+ localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21)
+ {
+ p.SetState(218)
+
+ var _x = p.Expr()
+
+ localctx.(*MapInitializerListContext)._expr = _x
+ }
+ localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr)
+ p.SetState(226)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 30, p.GetParserRuleContext())
+
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ {
+ p.SetState(219)
+ p.Match(CELParserCOMMA)
+ }
+ {
+ p.SetState(220)
+
+ var _x = p.OptExpr()
+
+ localctx.(*MapInitializerListContext)._optExpr = _x
+ }
+ localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr)
+ {
+ p.SetState(221)
+
+ var _m = p.Match(CELParserCOLON)
+
+ localctx.(*MapInitializerListContext).s21 = _m
+ }
+ localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21)
+ {
+ p.SetState(222)
+
+ var _x = p.Expr()
+
+ localctx.(*MapInitializerListContext)._expr = _x
+ }
+ localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr)
+
+ }
+ p.SetState(228)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 30, p.GetParserRuleContext())
+ }
+
+ return localctx
+}
+
+// IOptExprContext is an interface to support dynamic dispatch.
+type IOptExprContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetOpt returns the opt token.
+ GetOpt() antlr.Token
+
+ // SetOpt sets the opt token.
+ SetOpt(antlr.Token)
+
+ // GetE returns the e rule contexts.
+ GetE() IExprContext
+
+ // SetE sets the e rule contexts.
+ SetE(IExprContext)
+
+ // Getter signatures
+ Expr() IExprContext
+ QUESTIONMARK() antlr.TerminalNode
+
+ // IsOptExprContext differentiates from other interfaces.
+ IsOptExprContext()
+}
+
+type OptExprContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ opt antlr.Token
+ e IExprContext
+}
+
+func NewEmptyOptExprContext() *OptExprContext {
+ var p = new(OptExprContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_optExpr
+ return p
+}
+
+func (*OptExprContext) IsOptExprContext() {}
+
+func NewOptExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OptExprContext {
+ var p = new(OptExprContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_optExpr
+
+ return p
+}
+
+func (s *OptExprContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *OptExprContext) GetOpt() antlr.Token { return s.opt }
+
+func (s *OptExprContext) SetOpt(v antlr.Token) { s.opt = v }
+
+func (s *OptExprContext) GetE() IExprContext { return s.e }
+
+func (s *OptExprContext) SetE(v IExprContext) { s.e = v }
+
+func (s *OptExprContext) Expr() IExprContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *OptExprContext) QUESTIONMARK() antlr.TerminalNode {
+ return s.GetToken(CELParserQUESTIONMARK, 0)
+}
+
+func (s *OptExprContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *OptExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *OptExprContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterOptExpr(s)
+ }
+}
+
+func (s *OptExprContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitOptExpr(s)
+ }
+}
+
+func (s *OptExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitOptExpr(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) OptExpr() (localctx IOptExprContext) {
+ this := p
+ _ = this
+
+ localctx = NewOptExprContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 28, CELParserRULE_optExpr)
+ var _la int
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.EnterOuterAlt(localctx, 1)
+ p.SetState(230)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if _la == CELParserQUESTIONMARK {
+ {
+ p.SetState(229)
+
+ var _m = p.Match(CELParserQUESTIONMARK)
+
+ localctx.(*OptExprContext).opt = _m
+ }
+
+ }
+ {
+ p.SetState(232)
+
+ var _x = p.Expr()
+
+ localctx.(*OptExprContext).e = _x
+ }
+
+ return localctx
+}
+
+// ILiteralContext is an interface to support dynamic dispatch.
+type ILiteralContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+ // IsLiteralContext differentiates from other interfaces.
+ IsLiteralContext()
+}
+
+type LiteralContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+}
+
+func NewEmptyLiteralContext() *LiteralContext {
+ var p = new(LiteralContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_literal
+ return p
+}
+
+func (*LiteralContext) IsLiteralContext() {}
+
+func NewLiteralContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *LiteralContext {
+ var p = new(LiteralContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_literal
+
+ return p
+}
+
+func (s *LiteralContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *LiteralContext) CopyFrom(ctx *LiteralContext) {
+ s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext)
+}
+
+func (s *LiteralContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *LiteralContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+type BytesContext struct {
+ *LiteralContext
+ tok antlr.Token
+}
+
+func NewBytesContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BytesContext {
+ var p = new(BytesContext)
+
+ p.LiteralContext = NewEmptyLiteralContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*LiteralContext))
+
+ return p
+}
+
+func (s *BytesContext) GetTok() antlr.Token { return s.tok }
+
+func (s *BytesContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *BytesContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *BytesContext) BYTES() antlr.TerminalNode {
+ return s.GetToken(CELParserBYTES, 0)
+}
+
+func (s *BytesContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterBytes(s)
+ }
+}
+
+func (s *BytesContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitBytes(s)
+ }
+}
+
+func (s *BytesContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitBytes(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type UintContext struct {
+ *LiteralContext
+ tok antlr.Token
+}
+
+func NewUintContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *UintContext {
+ var p = new(UintContext)
+
+ p.LiteralContext = NewEmptyLiteralContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*LiteralContext))
+
+ return p
+}
+
+func (s *UintContext) GetTok() antlr.Token { return s.tok }
+
+func (s *UintContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *UintContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *UintContext) NUM_UINT() antlr.TerminalNode {
+ return s.GetToken(CELParserNUM_UINT, 0)
+}
+
+func (s *UintContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterUint(s)
+ }
+}
+
+func (s *UintContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitUint(s)
+ }
+}
+
+func (s *UintContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitUint(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type NullContext struct {
+ *LiteralContext
+ tok antlr.Token
+}
+
+func NewNullContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NullContext {
+ var p = new(NullContext)
+
+ p.LiteralContext = NewEmptyLiteralContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*LiteralContext))
+
+ return p
+}
+
+func (s *NullContext) GetTok() antlr.Token { return s.tok }
+
+func (s *NullContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *NullContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *NullContext) NUL() antlr.TerminalNode {
+ return s.GetToken(CELParserNUL, 0)
+}
+
+func (s *NullContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterNull(s)
+ }
+}
+
+func (s *NullContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitNull(s)
+ }
+}
+
+func (s *NullContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitNull(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type BoolFalseContext struct {
+ *LiteralContext
+ tok antlr.Token
+}
+
+func NewBoolFalseContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolFalseContext {
+ var p = new(BoolFalseContext)
+
+ p.LiteralContext = NewEmptyLiteralContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*LiteralContext))
+
+ return p
+}
+
+func (s *BoolFalseContext) GetTok() antlr.Token { return s.tok }
+
+func (s *BoolFalseContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *BoolFalseContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *BoolFalseContext) CEL_FALSE() antlr.TerminalNode {
+ return s.GetToken(CELParserCEL_FALSE, 0)
+}
+
+func (s *BoolFalseContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterBoolFalse(s)
+ }
+}
+
+func (s *BoolFalseContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitBoolFalse(s)
+ }
+}
+
+func (s *BoolFalseContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitBoolFalse(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type StringContext struct {
+ *LiteralContext
+ tok antlr.Token
+}
+
+func NewStringContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *StringContext {
+ var p = new(StringContext)
+
+ p.LiteralContext = NewEmptyLiteralContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*LiteralContext))
+
+ return p
+}
+
+func (s *StringContext) GetTok() antlr.Token { return s.tok }
+
+func (s *StringContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *StringContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *StringContext) STRING() antlr.TerminalNode {
+ return s.GetToken(CELParserSTRING, 0)
+}
+
+func (s *StringContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterString(s)
+ }
+}
+
+func (s *StringContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitString(s)
+ }
+}
+
+func (s *StringContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitString(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type DoubleContext struct {
+ *LiteralContext
+ sign antlr.Token
+ tok antlr.Token
+}
+
+func NewDoubleContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *DoubleContext {
+ var p = new(DoubleContext)
+
+ p.LiteralContext = NewEmptyLiteralContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*LiteralContext))
+
+ return p
+}
+
+func (s *DoubleContext) GetSign() antlr.Token { return s.sign }
+
+func (s *DoubleContext) GetTok() antlr.Token { return s.tok }
+
+func (s *DoubleContext) SetSign(v antlr.Token) { s.sign = v }
+
+func (s *DoubleContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *DoubleContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *DoubleContext) NUM_FLOAT() antlr.TerminalNode {
+ return s.GetToken(CELParserNUM_FLOAT, 0)
+}
+
+func (s *DoubleContext) MINUS() antlr.TerminalNode {
+ return s.GetToken(CELParserMINUS, 0)
+}
+
+func (s *DoubleContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterDouble(s)
+ }
+}
+
+func (s *DoubleContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitDouble(s)
+ }
+}
+
+func (s *DoubleContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitDouble(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type BoolTrueContext struct {
+ *LiteralContext
+ tok antlr.Token
+}
+
+func NewBoolTrueContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolTrueContext {
+ var p = new(BoolTrueContext)
+
+ p.LiteralContext = NewEmptyLiteralContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*LiteralContext))
+
+ return p
+}
+
+func (s *BoolTrueContext) GetTok() antlr.Token { return s.tok }
+
+func (s *BoolTrueContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *BoolTrueContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *BoolTrueContext) CEL_TRUE() antlr.TerminalNode {
+ return s.GetToken(CELParserCEL_TRUE, 0)
+}
+
+func (s *BoolTrueContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterBoolTrue(s)
+ }
+}
+
+func (s *BoolTrueContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitBoolTrue(s)
+ }
+}
+
+func (s *BoolTrueContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitBoolTrue(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type IntContext struct {
+ *LiteralContext
+ sign antlr.Token
+ tok antlr.Token
+}
+
+func NewIntContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IntContext {
+ var p = new(IntContext)
+
+ p.LiteralContext = NewEmptyLiteralContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*LiteralContext))
+
+ return p
+}
+
+func (s *IntContext) GetSign() antlr.Token { return s.sign }
+
+func (s *IntContext) GetTok() antlr.Token { return s.tok }
+
+func (s *IntContext) SetSign(v antlr.Token) { s.sign = v }
+
+func (s *IntContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *IntContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *IntContext) NUM_INT() antlr.TerminalNode {
+ return s.GetToken(CELParserNUM_INT, 0)
+}
+
+func (s *IntContext) MINUS() antlr.TerminalNode {
+ return s.GetToken(CELParserMINUS, 0)
+}
+
+func (s *IntContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterInt(s)
+ }
+}
+
+func (s *IntContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitInt(s)
+ }
+}
+
+func (s *IntContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitInt(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) Literal() (localctx ILiteralContext) {
+ this := p
+ _ = this
+
+ localctx = NewLiteralContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 30, CELParserRULE_literal)
+ var _la int
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.SetState(248)
+ p.GetErrorHandler().Sync(p)
+ switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 34, p.GetParserRuleContext()) {
+ case 1:
+ localctx = NewIntContext(p, localctx)
+ p.EnterOuterAlt(localctx, 1)
+ p.SetState(235)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if _la == CELParserMINUS {
+ {
+ p.SetState(234)
+
+ var _m = p.Match(CELParserMINUS)
+
+ localctx.(*IntContext).sign = _m
+ }
+
+ }
+ {
+ p.SetState(237)
+
+ var _m = p.Match(CELParserNUM_INT)
+
+ localctx.(*IntContext).tok = _m
+ }
+
+ case 2:
+ localctx = NewUintContext(p, localctx)
+ p.EnterOuterAlt(localctx, 2)
+ {
+ p.SetState(238)
+
+ var _m = p.Match(CELParserNUM_UINT)
+
+ localctx.(*UintContext).tok = _m
+ }
+
+ case 3:
+ localctx = NewDoubleContext(p, localctx)
+ p.EnterOuterAlt(localctx, 3)
+ p.SetState(240)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if _la == CELParserMINUS {
+ {
+ p.SetState(239)
+
+ var _m = p.Match(CELParserMINUS)
+
+ localctx.(*DoubleContext).sign = _m
+ }
+
+ }
+ {
+ p.SetState(242)
+
+ var _m = p.Match(CELParserNUM_FLOAT)
+
+ localctx.(*DoubleContext).tok = _m
+ }
+
+ case 4:
+ localctx = NewStringContext(p, localctx)
+ p.EnterOuterAlt(localctx, 4)
+ {
+ p.SetState(243)
+
+ var _m = p.Match(CELParserSTRING)
+
+ localctx.(*StringContext).tok = _m
+ }
+
+ case 5:
+ localctx = NewBytesContext(p, localctx)
+ p.EnterOuterAlt(localctx, 5)
+ {
+ p.SetState(244)
+
+ var _m = p.Match(CELParserBYTES)
+
+ localctx.(*BytesContext).tok = _m
+ }
+
+ case 6:
+ localctx = NewBoolTrueContext(p, localctx)
+ p.EnterOuterAlt(localctx, 6)
+ {
+ p.SetState(245)
+
+ var _m = p.Match(CELParserCEL_TRUE)
+
+ localctx.(*BoolTrueContext).tok = _m
+ }
+
+ case 7:
+ localctx = NewBoolFalseContext(p, localctx)
+ p.EnterOuterAlt(localctx, 7)
+ {
+ p.SetState(246)
+
+ var _m = p.Match(CELParserCEL_FALSE)
+
+ localctx.(*BoolFalseContext).tok = _m
+ }
+
+ case 8:
+ localctx = NewNullContext(p, localctx)
+ p.EnterOuterAlt(localctx, 8)
+ {
+ p.SetState(247)
+
+ var _m = p.Match(CELParserNUL)
+
+ localctx.(*NullContext).tok = _m
+ }
+
+ }
+
+ return localctx
+}
+
+func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int) bool {
+ switch ruleIndex {
+ case 4:
+ var t *RelationContext = nil
+ if localctx != nil {
+ t = localctx.(*RelationContext)
+ }
+ return p.Relation_Sempred(t, predIndex)
+
+ case 5:
+ var t *CalcContext = nil
+ if localctx != nil {
+ t = localctx.(*CalcContext)
+ }
+ return p.Calc_Sempred(t, predIndex)
+
+ case 7:
+ var t *MemberContext = nil
+ if localctx != nil {
+ t = localctx.(*MemberContext)
+ }
+ return p.Member_Sempred(t, predIndex)
+
+ default:
+ panic("No predicate with index: " + fmt.Sprint(ruleIndex))
+ }
+}
+
+func (p *CELParser) Relation_Sempred(localctx antlr.RuleContext, predIndex int) bool {
+ this := p
+ _ = this
+
+ switch predIndex {
+ case 0:
+ return p.Precpred(p.GetParserRuleContext(), 1)
+
+ default:
+ panic("No predicate with index: " + fmt.Sprint(predIndex))
+ }
+}
+
+func (p *CELParser) Calc_Sempred(localctx antlr.RuleContext, predIndex int) bool {
+ this := p
+ _ = this
+
+ switch predIndex {
+ case 1:
+ return p.Precpred(p.GetParserRuleContext(), 2)
+
+ case 2:
+ return p.Precpred(p.GetParserRuleContext(), 1)
+
+ default:
+ panic("No predicate with index: " + fmt.Sprint(predIndex))
+ }
+}
+
+func (p *CELParser) Member_Sempred(localctx antlr.RuleContext, predIndex int) bool {
+ this := p
+ _ = this
+
+ switch predIndex {
+ case 3:
+ return p.Precpred(p.GetParserRuleContext(), 3)
+
+ case 4:
+ return p.Precpred(p.GetParserRuleContext(), 2)
+
+ case 5:
+ return p.Precpred(p.GetParserRuleContext(), 1)
+
+ default:
+ panic("No predicate with index: " + fmt.Sprint(predIndex))
+ }
+}
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go
new file mode 100644
index 0000000..2c54e2c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go
@@ -0,0 +1,108 @@
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
+
+package gen // CEL
+import "github.com/antlr/antlr4/runtime/Go/antlr/v4"
+
+// A complete Visitor for a parse tree produced by CELParser.
+type CELVisitor interface {
+ antlr.ParseTreeVisitor
+
+ // Visit a parse tree produced by CELParser#start.
+ VisitStart(ctx *StartContext) interface{}
+
+ // Visit a parse tree produced by CELParser#expr.
+ VisitExpr(ctx *ExprContext) interface{}
+
+ // Visit a parse tree produced by CELParser#conditionalOr.
+ VisitConditionalOr(ctx *ConditionalOrContext) interface{}
+
+ // Visit a parse tree produced by CELParser#conditionalAnd.
+ VisitConditionalAnd(ctx *ConditionalAndContext) interface{}
+
+ // Visit a parse tree produced by CELParser#relation.
+ VisitRelation(ctx *RelationContext) interface{}
+
+ // Visit a parse tree produced by CELParser#calc.
+ VisitCalc(ctx *CalcContext) interface{}
+
+ // Visit a parse tree produced by CELParser#MemberExpr.
+ VisitMemberExpr(ctx *MemberExprContext) interface{}
+
+ // Visit a parse tree produced by CELParser#LogicalNot.
+ VisitLogicalNot(ctx *LogicalNotContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Negate.
+ VisitNegate(ctx *NegateContext) interface{}
+
+ // Visit a parse tree produced by CELParser#MemberCall.
+ VisitMemberCall(ctx *MemberCallContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Select.
+ VisitSelect(ctx *SelectContext) interface{}
+
+ // Visit a parse tree produced by CELParser#PrimaryExpr.
+ VisitPrimaryExpr(ctx *PrimaryExprContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Index.
+ VisitIndex(ctx *IndexContext) interface{}
+
+ // Visit a parse tree produced by CELParser#IdentOrGlobalCall.
+ VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Nested.
+ VisitNested(ctx *NestedContext) interface{}
+
+ // Visit a parse tree produced by CELParser#CreateList.
+ VisitCreateList(ctx *CreateListContext) interface{}
+
+ // Visit a parse tree produced by CELParser#CreateStruct.
+ VisitCreateStruct(ctx *CreateStructContext) interface{}
+
+ // Visit a parse tree produced by CELParser#CreateMessage.
+ VisitCreateMessage(ctx *CreateMessageContext) interface{}
+
+ // Visit a parse tree produced by CELParser#ConstantLiteral.
+ VisitConstantLiteral(ctx *ConstantLiteralContext) interface{}
+
+ // Visit a parse tree produced by CELParser#exprList.
+ VisitExprList(ctx *ExprListContext) interface{}
+
+ // Visit a parse tree produced by CELParser#listInit.
+ VisitListInit(ctx *ListInitContext) interface{}
+
+ // Visit a parse tree produced by CELParser#fieldInitializerList.
+ VisitFieldInitializerList(ctx *FieldInitializerListContext) interface{}
+
+ // Visit a parse tree produced by CELParser#optField.
+ VisitOptField(ctx *OptFieldContext) interface{}
+
+ // Visit a parse tree produced by CELParser#mapInitializerList.
+ VisitMapInitializerList(ctx *MapInitializerListContext) interface{}
+
+ // Visit a parse tree produced by CELParser#optExpr.
+ VisitOptExpr(ctx *OptExprContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Int.
+ VisitInt(ctx *IntContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Uint.
+ VisitUint(ctx *UintContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Double.
+ VisitDouble(ctx *DoubleContext) interface{}
+
+ // Visit a parse tree produced by CELParser#String.
+ VisitString(ctx *StringContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Bytes.
+ VisitBytes(ctx *BytesContext) interface{}
+
+ // Visit a parse tree produced by CELParser#BoolTrue.
+ VisitBoolTrue(ctx *BoolTrueContext) interface{}
+
+ // Visit a parse tree produced by CELParser#BoolFalse.
+ VisitBoolFalse(ctx *BoolFalseContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Null.
+ VisitNull(ctx *NullContext) interface{}
+}
diff --git a/vendor/github.com/google/cel-go/parser/gen/doc.go b/vendor/github.com/google/cel-go/parser/gen/doc.go
new file mode 100644
index 0000000..57edd44
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package gen contains all of the ANTLR-generated sources used by the cel-go parser.
+package gen
diff --git a/vendor/github.com/google/cel-go/parser/gen/generate.sh b/vendor/github.com/google/cel-go/parser/gen/generate.sh
new file mode 100644
index 0000000..389107c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/generate.sh
@@ -0,0 +1,35 @@
+#!/bin/bash -eu
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# To regenerate the CEL lexer/parser statically do the following:
+# 1. Download the latest anltr tool from https://www.antlr.org/download.html
+# 2. Copy the downloaded jar to the gen directory. It will have a name
+# like antlr--complete.jar.
+# 3. Modify the script below to refer to the current ANTLR version.
+# 4. Execute the generation script from the gen directory.
+# 5. Delete the jar and commit the regenerated sources.
+
+#!/bin/sh
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+# Generate AntLR artifacts.
+java -Xmx500M -cp ${DIR}/antlr-4.12.0-complete.jar org.antlr.v4.Tool \
+ -Dlanguage=Go \
+ -package gen \
+ -o ${DIR} \
+ -visitor ${DIR}/CEL.g4
+
diff --git a/vendor/github.com/google/cel-go/parser/helper.go b/vendor/github.com/google/cel-go/parser/helper.go
new file mode 100644
index 0000000..a5f29e3
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/helper.go
@@ -0,0 +1,600 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "sync"
+
+ antlr "github.com/antlr/antlr4/runtime/Go/antlr/v4"
+
+ "github.com/google/cel-go/common"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+type parserHelper struct {
+ source common.Source
+ nextID int64
+ positions map[int64]int32
+ macroCalls map[int64]*exprpb.Expr
+}
+
+func newParserHelper(source common.Source) *parserHelper {
+ return &parserHelper{
+ source: source,
+ nextID: 1,
+ positions: make(map[int64]int32),
+ macroCalls: make(map[int64]*exprpb.Expr),
+ }
+}
+
+func (p *parserHelper) getSourceInfo() *exprpb.SourceInfo {
+ return &exprpb.SourceInfo{
+ Location: p.source.Description(),
+ Positions: p.positions,
+ LineOffsets: p.source.LineOffsets(),
+ MacroCalls: p.macroCalls}
+}
+
+func (p *parserHelper) newLiteral(ctx any, value *exprpb.Constant) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_ConstExpr{ConstExpr: value}
+ return exprNode
+}
+
+func (p *parserHelper) newLiteralBool(ctx any, value bool) *exprpb.Expr {
+ return p.newLiteral(ctx,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: value}})
+}
+
+func (p *parserHelper) newLiteralString(ctx any, value string) *exprpb.Expr {
+ return p.newLiteral(ctx,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: value}})
+}
+
+func (p *parserHelper) newLiteralBytes(ctx any, value []byte) *exprpb.Expr {
+ return p.newLiteral(ctx,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: value}})
+}
+
+func (p *parserHelper) newLiteralInt(ctx any, value int64) *exprpb.Expr {
+ return p.newLiteral(ctx,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: value}})
+}
+
+func (p *parserHelper) newLiteralUint(ctx any, value uint64) *exprpb.Expr {
+ return p.newLiteral(ctx, &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: value}})
+}
+
+func (p *parserHelper) newLiteralDouble(ctx any, value float64) *exprpb.Expr {
+ return p.newLiteral(ctx,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: value}})
+}
+
+func (p *parserHelper) newIdent(ctx any, name string) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_IdentExpr{IdentExpr: &exprpb.Expr_Ident{Name: name}}
+ return exprNode
+}
+
+func (p *parserHelper) newSelect(ctx any, operand *exprpb.Expr, field string) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_SelectExpr{
+ SelectExpr: &exprpb.Expr_Select{Operand: operand, Field: field}}
+ return exprNode
+}
+
+func (p *parserHelper) newPresenceTest(ctx any, operand *exprpb.Expr, field string) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_SelectExpr{
+ SelectExpr: &exprpb.Expr_Select{Operand: operand, Field: field, TestOnly: true}}
+ return exprNode
+}
+
+func (p *parserHelper) newGlobalCall(ctx any, function string, args ...*exprpb.Expr) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{Function: function, Args: args}}
+ return exprNode
+}
+
+func (p *parserHelper) newReceiverCall(ctx any, function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{Function: function, Target: target, Args: args}}
+ return exprNode
+}
+
+func (p *parserHelper) newList(ctx any, elements []*exprpb.Expr, optionals ...int32) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_ListExpr{
+ ListExpr: &exprpb.Expr_CreateList{
+ Elements: elements,
+ OptionalIndices: optionals,
+ }}
+ return exprNode
+}
+
+func (p *parserHelper) newMap(ctx any, entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{Entries: entries}}
+ return exprNode
+}
+
+func (p *parserHelper) newMapEntry(entryID int64, key *exprpb.Expr, value *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
+ return &exprpb.Expr_CreateStruct_Entry{
+ Id: entryID,
+ KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{MapKey: key},
+ Value: value,
+ OptionalEntry: optional,
+ }
+}
+
+func (p *parserHelper) newObject(ctx any, typeName string, entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{
+ MessageName: typeName,
+ Entries: entries,
+ },
+ }
+ return exprNode
+}
+
+func (p *parserHelper) newObjectField(fieldID int64, field string, value *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
+ return &exprpb.Expr_CreateStruct_Entry{
+ Id: fieldID,
+ KeyKind: &exprpb.Expr_CreateStruct_Entry_FieldKey{FieldKey: field},
+ Value: value,
+ OptionalEntry: optional,
+ }
+}
+
+func (p *parserHelper) newComprehension(ctx any, iterVar string,
+ iterRange *exprpb.Expr,
+ accuVar string,
+ accuInit *exprpb.Expr,
+ condition *exprpb.Expr,
+ step *exprpb.Expr,
+ result *exprpb.Expr) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_ComprehensionExpr{
+ ComprehensionExpr: &exprpb.Expr_Comprehension{
+ AccuVar: accuVar,
+ AccuInit: accuInit,
+ IterVar: iterVar,
+ IterRange: iterRange,
+ LoopCondition: condition,
+ LoopStep: step,
+ Result: result}}
+ return exprNode
+}
+
+func (p *parserHelper) newExpr(ctx any) *exprpb.Expr {
+ id, isID := ctx.(int64)
+ if isID {
+ return &exprpb.Expr{Id: id}
+ }
+ return &exprpb.Expr{Id: p.id(ctx)}
+}
+
+func (p *parserHelper) id(ctx any) int64 {
+ var location common.Location
+ switch c := ctx.(type) {
+ case antlr.ParserRuleContext:
+ token := c.GetStart()
+ location = p.source.NewLocation(token.GetLine(), token.GetColumn())
+ case antlr.Token:
+ token := c
+ location = p.source.NewLocation(token.GetLine(), token.GetColumn())
+ case common.Location:
+ location = c
+ default:
+ // This should only happen if the ctx is nil
+ return -1
+ }
+ id := p.nextID
+ p.positions[id], _ = p.source.LocationOffset(location)
+ p.nextID++
+ return id
+}
+
+func (p *parserHelper) getLocation(id int64) common.Location {
+ characterOffset := p.positions[id]
+ location, _ := p.source.OffsetLocation(characterOffset)
+ return location
+}
+
+// buildMacroCallArg iterates the expression and returns a new expression
+// where all macros have been replaced by their IDs in MacroCalls
+func (p *parserHelper) buildMacroCallArg(expr *exprpb.Expr) *exprpb.Expr {
+ if _, found := p.macroCalls[expr.GetId()]; found {
+ return &exprpb.Expr{Id: expr.GetId()}
+ }
+
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_CallExpr:
+ // Iterate the AST from `expr` recursively looking for macros. Because we are at most
+ // starting from the top level macro, this recursion is bounded by the size of the AST. This
+ // means that the depth check on the AST during parsing will catch recursion overflows
+ // before we get to here.
+ macroTarget := expr.GetCallExpr().GetTarget()
+ if macroTarget != nil {
+ macroTarget = p.buildMacroCallArg(macroTarget)
+ }
+ macroArgs := make([]*exprpb.Expr, len(expr.GetCallExpr().GetArgs()))
+ for index, arg := range expr.GetCallExpr().GetArgs() {
+ macroArgs[index] = p.buildMacroCallArg(arg)
+ }
+ return &exprpb.Expr{
+ Id: expr.GetId(),
+ ExprKind: &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{
+ Target: macroTarget,
+ Function: expr.GetCallExpr().GetFunction(),
+ Args: macroArgs,
+ },
+ },
+ }
+ case *exprpb.Expr_ListExpr:
+ listExpr := expr.GetListExpr()
+ macroListArgs := make([]*exprpb.Expr, len(listExpr.GetElements()))
+ for i, elem := range listExpr.GetElements() {
+ macroListArgs[i] = p.buildMacroCallArg(elem)
+ }
+ return &exprpb.Expr{
+ Id: expr.GetId(),
+ ExprKind: &exprpb.Expr_ListExpr{
+ ListExpr: &exprpb.Expr_CreateList{
+ Elements: macroListArgs,
+ OptionalIndices: listExpr.GetOptionalIndices(),
+ },
+ },
+ }
+ }
+
+ return expr
+}
+
+// addMacroCall adds the macro the the MacroCalls map in source info. If a macro has args/subargs/target
+// that are macros, their ID will be stored instead for later self-lookups.
+func (p *parserHelper) addMacroCall(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) {
+ macroTarget := target
+ if target != nil {
+ if _, found := p.macroCalls[target.GetId()]; found {
+ macroTarget = &exprpb.Expr{Id: target.GetId()}
+ } else {
+ macroTarget = p.buildMacroCallArg(target)
+ }
+ }
+
+ macroArgs := make([]*exprpb.Expr, len(args))
+ for index, arg := range args {
+ macroArgs[index] = p.buildMacroCallArg(arg)
+ }
+
+ p.macroCalls[exprID] = &exprpb.Expr{
+ ExprKind: &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{
+ Target: macroTarget,
+ Function: function,
+ Args: macroArgs,
+ },
+ },
+ }
+}
+
+// logicManager compacts logical trees into a more efficient structure which is semantically
+// equivalent with how the logic graph is constructed by the ANTLR parser.
+//
+// The purpose of the logicManager is to ensure a compact serialization format for the logical &&, ||
+// operators which have a tendency to create long DAGs which are skewed in one direction. Since the
+// operators are commutative re-ordering the terms *must not* affect the evaluation result.
+//
+// The logic manager will either render the terms to N-chained && / || operators as a single logical
+// call with N-terms, or will rebalance the tree. Rebalancing the terms is a safe, if somewhat
+// controversial choice as it alters the traditional order of execution assumptions present in most
+// expressions.
+type logicManager struct {
+ helper *parserHelper
+ function string
+ terms []*exprpb.Expr
+ ops []int64
+ variadicASTs bool
+}
+
+// newVariadicLogicManager creates a logic manager instance bound to a specific function and its first term.
+func newVariadicLogicManager(h *parserHelper, function string, term *exprpb.Expr) *logicManager {
+ return &logicManager{
+ helper: h,
+ function: function,
+ terms: []*exprpb.Expr{term},
+ ops: []int64{},
+ variadicASTs: true,
+ }
+}
+
+// newBalancingLogicManager creates a logic manager instance bound to a specific function and its first term.
+func newBalancingLogicManager(h *parserHelper, function string, term *exprpb.Expr) *logicManager {
+ return &logicManager{
+ helper: h,
+ function: function,
+ terms: []*exprpb.Expr{term},
+ ops: []int64{},
+ variadicASTs: false,
+ }
+}
+
+// addTerm adds an operation identifier and term to the set of terms to be balanced.
+func (l *logicManager) addTerm(op int64, term *exprpb.Expr) {
+ l.terms = append(l.terms, term)
+ l.ops = append(l.ops, op)
+}
+
+// toExpr renders the logic graph into an Expr value, either balancing a tree of logical
+// operations or creating a variadic representation of the logical operator.
+func (l *logicManager) toExpr() *exprpb.Expr {
+ if len(l.terms) == 1 {
+ return l.terms[0]
+ }
+ if l.variadicASTs {
+ return l.helper.newGlobalCall(l.ops[0], l.function, l.terms...)
+ }
+ return l.balancedTree(0, len(l.ops)-1)
+}
+
+// balancedTree recursively balances the terms provided to a commutative operator.
+func (l *logicManager) balancedTree(lo, hi int) *exprpb.Expr {
+ mid := (lo + hi + 1) / 2
+
+ var left *exprpb.Expr
+ if mid == lo {
+ left = l.terms[mid]
+ } else {
+ left = l.balancedTree(lo, mid-1)
+ }
+
+ var right *exprpb.Expr
+ if mid == hi {
+ right = l.terms[mid+1]
+ } else {
+ right = l.balancedTree(mid+1, hi)
+ }
+ return l.helper.newGlobalCall(l.ops[mid], l.function, left, right)
+}
+
+type exprHelper struct {
+ *parserHelper
+ id int64
+}
+
+func (e *exprHelper) nextMacroID() int64 {
+ return e.parserHelper.id(e.parserHelper.getLocation(e.id))
+}
+
+// Copy implements the ExprHelper interface method by producing a copy of the input Expr value
+// with a fresh set of numeric identifiers the Expr and all its descendants.
+func (e *exprHelper) Copy(expr *exprpb.Expr) *exprpb.Expr {
+ copy := e.parserHelper.newExpr(e.parserHelper.getLocation(expr.GetId()))
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ copy.ExprKind = &exprpb.Expr_ConstExpr{ConstExpr: expr.GetConstExpr()}
+ case *exprpb.Expr_IdentExpr:
+ copy.ExprKind = &exprpb.Expr_IdentExpr{IdentExpr: expr.GetIdentExpr()}
+ case *exprpb.Expr_SelectExpr:
+ op := expr.GetSelectExpr().GetOperand()
+ copy.ExprKind = &exprpb.Expr_SelectExpr{SelectExpr: &exprpb.Expr_Select{
+ Operand: e.Copy(op),
+ Field: expr.GetSelectExpr().GetField(),
+ TestOnly: expr.GetSelectExpr().GetTestOnly(),
+ }}
+ case *exprpb.Expr_CallExpr:
+ call := expr.GetCallExpr()
+ target := call.GetTarget()
+ if target != nil {
+ target = e.Copy(target)
+ }
+ args := call.GetArgs()
+ argsCopy := make([]*exprpb.Expr, len(args))
+ for i, arg := range args {
+ argsCopy[i] = e.Copy(arg)
+ }
+ copy.ExprKind = &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{
+ Function: call.GetFunction(),
+ Target: target,
+ Args: argsCopy,
+ },
+ }
+ case *exprpb.Expr_ListExpr:
+ elems := expr.GetListExpr().GetElements()
+ elemsCopy := make([]*exprpb.Expr, len(elems))
+ for i, elem := range elems {
+ elemsCopy[i] = e.Copy(elem)
+ }
+ copy.ExprKind = &exprpb.Expr_ListExpr{
+ ListExpr: &exprpb.Expr_CreateList{Elements: elemsCopy},
+ }
+ case *exprpb.Expr_StructExpr:
+ entries := expr.GetStructExpr().GetEntries()
+ entriesCopy := make([]*exprpb.Expr_CreateStruct_Entry, len(entries))
+ for i, entry := range entries {
+ entryCopy := &exprpb.Expr_CreateStruct_Entry{}
+ entryCopy.Id = e.nextMacroID()
+ switch entry.GetKeyKind().(type) {
+ case *exprpb.Expr_CreateStruct_Entry_FieldKey:
+ entryCopy.KeyKind = &exprpb.Expr_CreateStruct_Entry_FieldKey{
+ FieldKey: entry.GetFieldKey(),
+ }
+ case *exprpb.Expr_CreateStruct_Entry_MapKey:
+ entryCopy.KeyKind = &exprpb.Expr_CreateStruct_Entry_MapKey{
+ MapKey: e.Copy(entry.GetMapKey()),
+ }
+ }
+ entryCopy.Value = e.Copy(entry.GetValue())
+ entriesCopy[i] = entryCopy
+ }
+ copy.ExprKind = &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{
+ MessageName: expr.GetStructExpr().GetMessageName(),
+ Entries: entriesCopy,
+ },
+ }
+ case *exprpb.Expr_ComprehensionExpr:
+ iterRange := e.Copy(expr.GetComprehensionExpr().GetIterRange())
+ accuInit := e.Copy(expr.GetComprehensionExpr().GetAccuInit())
+ cond := e.Copy(expr.GetComprehensionExpr().GetLoopCondition())
+ step := e.Copy(expr.GetComprehensionExpr().GetLoopStep())
+ result := e.Copy(expr.GetComprehensionExpr().GetResult())
+ copy.ExprKind = &exprpb.Expr_ComprehensionExpr{
+ ComprehensionExpr: &exprpb.Expr_Comprehension{
+ IterRange: iterRange,
+ IterVar: expr.GetComprehensionExpr().GetIterVar(),
+ AccuInit: accuInit,
+ AccuVar: expr.GetComprehensionExpr().GetAccuVar(),
+ LoopCondition: cond,
+ LoopStep: step,
+ Result: result,
+ },
+ }
+ }
+ return copy
+}
+
+// LiteralBool implements the ExprHelper interface method.
+func (e *exprHelper) LiteralBool(value bool) *exprpb.Expr {
+ return e.parserHelper.newLiteralBool(e.nextMacroID(), value)
+}
+
+// LiteralBytes implements the ExprHelper interface method.
+func (e *exprHelper) LiteralBytes(value []byte) *exprpb.Expr {
+ return e.parserHelper.newLiteralBytes(e.nextMacroID(), value)
+}
+
+// LiteralDouble implements the ExprHelper interface method.
+func (e *exprHelper) LiteralDouble(value float64) *exprpb.Expr {
+ return e.parserHelper.newLiteralDouble(e.nextMacroID(), value)
+}
+
+// LiteralInt implements the ExprHelper interface method.
+func (e *exprHelper) LiteralInt(value int64) *exprpb.Expr {
+ return e.parserHelper.newLiteralInt(e.nextMacroID(), value)
+}
+
+// LiteralString implements the ExprHelper interface method.
+func (e *exprHelper) LiteralString(value string) *exprpb.Expr {
+ return e.parserHelper.newLiteralString(e.nextMacroID(), value)
+}
+
+// LiteralUint implements the ExprHelper interface method.
+func (e *exprHelper) LiteralUint(value uint64) *exprpb.Expr {
+ return e.parserHelper.newLiteralUint(e.nextMacroID(), value)
+}
+
+// NewList implements the ExprHelper interface method.
+func (e *exprHelper) NewList(elems ...*exprpb.Expr) *exprpb.Expr {
+ return e.parserHelper.newList(e.nextMacroID(), elems)
+}
+
+// NewMap implements the ExprHelper interface method.
+func (e *exprHelper) NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
+ return e.parserHelper.newMap(e.nextMacroID(), entries...)
+}
+
+// NewMapEntry implements the ExprHelper interface method.
+func (e *exprHelper) NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
+ return e.parserHelper.newMapEntry(e.nextMacroID(), key, val, optional)
+}
+
+// NewObject implements the ExprHelper interface method.
+func (e *exprHelper) NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
+ return e.parserHelper.newObject(e.nextMacroID(), typeName, fieldInits...)
+}
+
+// NewObjectFieldInit implements the ExprHelper interface method.
+func (e *exprHelper) NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
+ return e.parserHelper.newObjectField(e.nextMacroID(), field, init, optional)
+}
+
+// Fold implements the ExprHelper interface method.
+func (e *exprHelper) Fold(iterVar string,
+ iterRange *exprpb.Expr,
+ accuVar string,
+ accuInit *exprpb.Expr,
+ condition *exprpb.Expr,
+ step *exprpb.Expr,
+ result *exprpb.Expr) *exprpb.Expr {
+ return e.parserHelper.newComprehension(
+ e.nextMacroID(), iterVar, iterRange, accuVar, accuInit, condition, step, result)
+}
+
+// Ident implements the ExprHelper interface method.
+func (e *exprHelper) Ident(name string) *exprpb.Expr {
+ return e.parserHelper.newIdent(e.nextMacroID(), name)
+}
+
+// AccuIdent implements the ExprHelper interface method.
+func (e *exprHelper) AccuIdent() *exprpb.Expr {
+ return e.parserHelper.newIdent(e.nextMacroID(), AccumulatorName)
+}
+
+// GlobalCall implements the ExprHelper interface method.
+func (e *exprHelper) GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr {
+ return e.parserHelper.newGlobalCall(e.nextMacroID(), function, args...)
+}
+
+// ReceiverCall implements the ExprHelper interface method.
+func (e *exprHelper) ReceiverCall(function string,
+ target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
+ return e.parserHelper.newReceiverCall(e.nextMacroID(), function, target, args...)
+}
+
+// PresenceTest implements the ExprHelper interface method.
+func (e *exprHelper) PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr {
+ return e.parserHelper.newPresenceTest(e.nextMacroID(), operand, field)
+}
+
+// Select implements the ExprHelper interface method.
+func (e *exprHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr {
+ return e.parserHelper.newSelect(e.nextMacroID(), operand, field)
+}
+
+// OffsetLocation implements the ExprHelper interface method.
+func (e *exprHelper) OffsetLocation(exprID int64) common.Location {
+ offset, found := e.parserHelper.positions[exprID]
+ if !found {
+ return common.NoLocation
+ }
+ location, found := e.parserHelper.source.OffsetLocation(offset)
+ if !found {
+ return common.NoLocation
+ }
+ return location
+}
+
+// NewError associates an error message with a given expression id, populating the source offset location of the error if possible.
+func (e *exprHelper) NewError(exprID int64, message string) *common.Error {
+ return common.NewError(exprID, message, e.OffsetLocation(exprID))
+}
+
+var (
+ // Thread-safe pool of ExprHelper values to minimize alloc overhead of ExprHelper creations.
+ exprHelperPool = &sync.Pool{
+ New: func() any {
+ return &exprHelper{}
+ },
+ }
+)
diff --git a/vendor/github.com/google/cel-go/parser/input.go b/vendor/github.com/google/cel-go/parser/input.go
new file mode 100644
index 0000000..810eaff
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/input.go
@@ -0,0 +1,129 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ antlr "github.com/antlr/antlr4/runtime/Go/antlr/v4"
+
+ "github.com/google/cel-go/common/runes"
+)
+
+type charStream struct {
+ buf runes.Buffer
+ pos int
+ src string
+}
+
+// Consume implements (antlr.CharStream).Consume.
+func (c *charStream) Consume() {
+ if c.pos >= c.buf.Len() {
+ panic("cannot consume EOF")
+ }
+ c.pos++
+}
+
+// LA implements (antlr.CharStream).LA.
+func (c *charStream) LA(offset int) int {
+ if offset == 0 {
+ return 0
+ }
+ if offset < 0 {
+ offset++
+ }
+ pos := c.pos + offset - 1
+ if pos < 0 || pos >= c.buf.Len() {
+ return antlr.TokenEOF
+ }
+ return int(c.buf.Get(pos))
+}
+
+// LT mimics (*antlr.InputStream).LT.
+func (c *charStream) LT(offset int) int {
+ return c.LA(offset)
+}
+
+// Mark implements (antlr.CharStream).Mark.
+func (c *charStream) Mark() int {
+ return -1
+}
+
+// Release implements (antlr.CharStream).Release.
+func (c *charStream) Release(marker int) {}
+
+// Index implements (antlr.CharStream).Index.
+func (c *charStream) Index() int {
+ return c.pos
+}
+
+// Seek implements (antlr.CharStream).Seek.
+func (c *charStream) Seek(index int) {
+ if index <= c.pos {
+ c.pos = index
+ return
+ }
+ if index < c.buf.Len() {
+ c.pos = index
+ } else {
+ c.pos = c.buf.Len()
+ }
+}
+
+// Size implements (antlr.CharStream).Size.
+func (c *charStream) Size() int {
+ return c.buf.Len()
+}
+
+// GetSourceName implements (antlr.CharStream).GetSourceName.
+func (c *charStream) GetSourceName() string {
+ return c.src
+}
+
+// GetText implements (antlr.CharStream).GetText.
+func (c *charStream) GetText(start, stop int) string {
+ if stop >= c.buf.Len() {
+ stop = c.buf.Len() - 1
+ }
+ if start >= c.buf.Len() {
+ return ""
+ }
+ return c.buf.Slice(start, stop+1)
+}
+
+// GetTextFromTokens implements (antlr.CharStream).GetTextFromTokens.
+func (c *charStream) GetTextFromTokens(start, stop antlr.Token) string {
+ if start != nil && stop != nil {
+ return c.GetText(start.GetTokenIndex(), stop.GetTokenIndex())
+ }
+ return ""
+}
+
+// GetTextFromInterval implements (antlr.CharStream).GetTextFromInterval.
+func (c *charStream) GetTextFromInterval(i *antlr.Interval) string {
+ return c.GetText(i.Start, i.Stop)
+}
+
+// String mimics (*antlr.InputStream).String.
+func (c *charStream) String() string {
+ return c.buf.Slice(0, c.buf.Len())
+}
+
+var _ antlr.CharStream = &charStream{}
+
+func newCharStream(buf runes.Buffer, desc string) antlr.CharStream {
+ return &charStream{
+ buf: buf,
+ src: desc,
+ }
+}
diff --git a/vendor/github.com/google/cel-go/parser/macro.go b/vendor/github.com/google/cel-go/parser/macro.go
new file mode 100644
index 0000000..6066e8e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/macro.go
@@ -0,0 +1,424 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/operators"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// NewGlobalMacro creates a Macro for a global function with the specified arg count.
+func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro {
+ return ¯o{
+ function: function,
+ argCount: argCount,
+ expander: expander}
+}
+
+// NewReceiverMacro creates a Macro for a receiver function matching the specified arg count.
+func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro {
+ return ¯o{
+ function: function,
+ argCount: argCount,
+ expander: expander,
+ receiverStyle: true}
+}
+
+// NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count.
+func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro {
+ return ¯o{
+ function: function,
+ expander: expander,
+ varArgStyle: true}
+}
+
+// NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
+func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro {
+ return ¯o{
+ function: function,
+ expander: expander,
+ receiverStyle: true,
+ varArgStyle: true}
+}
+
+// Macro interface for describing the function signature to match and the MacroExpander to apply.
+//
+// Note: when a Macro should apply to multiple overloads (based on arg count) of a given function,
+// a Macro should be created per arg-count.
+type Macro interface {
+ // Function name to match.
+ Function() string
+
+ // ArgCount for the function call.
+ //
+ // When the macro is a var-arg style macro, the return value will be zero, but the MacroKey
+ // will contain a `*` where the arg count would have been.
+ ArgCount() int
+
+ // IsReceiverStyle returns true if the macro matches a receiver style call.
+ IsReceiverStyle() bool
+
+ // MacroKey returns the macro signatures accepted by this macro.
+ //
+ // Format: `::`.
+ //
+ // When the macros is a var-arg style macro, the `arg-count` value is represented as a `*`.
+ MacroKey() string
+
+ // Expander returns the MacroExpander to apply when the macro key matches the parsed call
+ // signature.
+ Expander() MacroExpander
+}
+
+// Macro type which declares the function name and arg count expected for the
+// macro, as well as a macro expansion function.
+type macro struct {
+ function string
+ receiverStyle bool
+ varArgStyle bool
+ argCount int
+ expander MacroExpander
+}
+
+// Function returns the macro's function name (i.e. the function whose syntax it mimics).
+func (m *macro) Function() string {
+ return m.function
+}
+
+// ArgCount returns the number of arguments the macro expects.
+func (m *macro) ArgCount() int {
+ return m.argCount
+}
+
+// IsReceiverStyle returns whether the macro is receiver style.
+func (m *macro) IsReceiverStyle() bool {
+ return m.receiverStyle
+}
+
+// Expander implements the Macro interface method.
+func (m *macro) Expander() MacroExpander {
+ return m.expander
+}
+
+// MacroKey implements the Macro interface method.
+func (m *macro) MacroKey() string {
+ if m.varArgStyle {
+ return makeVarArgMacroKey(m.function, m.receiverStyle)
+ }
+ return makeMacroKey(m.function, m.argCount, m.receiverStyle)
+}
+
+func makeMacroKey(name string, args int, receiverStyle bool) string {
+ return fmt.Sprintf("%s:%d:%v", name, args, receiverStyle)
+}
+
+func makeVarArgMacroKey(name string, receiverStyle bool) string {
+ return fmt.Sprintf("%s:*:%v", name, receiverStyle)
+}
+
+// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree.
+//
+// If the MacroExpander determines within the implementation that an expansion is not needed it may return
+// a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments
+// are not well-formed, the result of the expansion will be an error.
+//
+// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call
+// and produces as output an Expr ast node.
+//
+// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil.
+type MacroExpander func(eh ExprHelper,
+ target *exprpb.Expr,
+ args []*exprpb.Expr) (*exprpb.Expr, *common.Error)
+
+// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is
+// consistent with the source position and expression id generation code leveraged by both
+// the parser and type-checker.
+type ExprHelper interface {
+ // Copy the input expression with a brand new set of identifiers.
+ Copy(*exprpb.Expr) *exprpb.Expr
+
+ // LiteralBool creates an Expr value for a bool literal.
+ LiteralBool(value bool) *exprpb.Expr
+
+ // LiteralBytes creates an Expr value for a byte literal.
+ LiteralBytes(value []byte) *exprpb.Expr
+
+ // LiteralDouble creates an Expr value for double literal.
+ LiteralDouble(value float64) *exprpb.Expr
+
+ // LiteralInt creates an Expr value for an int literal.
+ LiteralInt(value int64) *exprpb.Expr
+
+ // LiteralString creates am Expr value for a string literal.
+ LiteralString(value string) *exprpb.Expr
+
+ // LiteralUint creates an Expr value for a uint literal.
+ LiteralUint(value uint64) *exprpb.Expr
+
+ // NewList creates a CreateList instruction where the list is comprised of the optional set
+ // of elements provided as arguments.
+ NewList(elems ...*exprpb.Expr) *exprpb.Expr
+
+ // NewMap creates a CreateStruct instruction for a map where the map is comprised of the
+ // optional set of key, value entries.
+ NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
+
+ // NewMapEntry creates a Map Entry for the key, value pair.
+ NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry
+
+ // NewObject creates a CreateStruct instruction for an object with a given type name and
+ // optional set of field initializers.
+ NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
+
+ // NewObjectFieldInit creates a new Object field initializer from the field name and value.
+ NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry
+
+ // Fold creates a fold comprehension instruction.
+ //
+ // - iterVar is the iteration variable name.
+ // - iterRange represents the expression that resolves to a list or map where the elements or
+ // keys (respectively) will be iterated over.
+ // - accuVar is the accumulation variable name, typically parser.AccumulatorName.
+ // - accuInit is the initial expression whose value will be set for the accuVar prior to
+ // folding.
+ // - condition is the expression to test to determine whether to continue folding.
+ // - step is the expression to evaluation at the conclusion of a single fold iteration.
+ // - result is the computation to evaluate at the conclusion of the fold.
+ //
+ // The accuVar should not shadow variable names that you would like to reference within the
+ // environment in the step and condition expressions. Presently, the name __result__ is commonly
+ // used by built-in macros but this may change in the future.
+ Fold(iterVar string,
+ iterRange *exprpb.Expr,
+ accuVar string,
+ accuInit *exprpb.Expr,
+ condition *exprpb.Expr,
+ step *exprpb.Expr,
+ result *exprpb.Expr) *exprpb.Expr
+
+ // Ident creates an identifier Expr value.
+ Ident(name string) *exprpb.Expr
+
+ // AccuIdent returns an accumulator identifier for use with comprehension results.
+ AccuIdent() *exprpb.Expr
+
+ // GlobalCall creates a function call Expr value for a global (free) function.
+ GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr
+
+ // ReceiverCall creates a function call Expr value for a receiver-style function.
+ ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr
+
+ // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics.
+ PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr
+
+ // Select create a field traversal Expr value.
+ Select(operand *exprpb.Expr, field string) *exprpb.Expr
+
+ // OffsetLocation returns the Location of the expression identifier.
+ OffsetLocation(exprID int64) common.Location
+
+ // NewError associates an error message with a given expression id.
+ NewError(exprID int64, message string) *common.Error
+}
+
+var (
+ // HasMacro expands "has(m.f)" which tests the presence of a field, avoiding the need to
+ // specify the field as a string.
+ HasMacro = NewGlobalMacro(operators.Has, 1, MakeHas)
+
+ // AllMacro expands "range.all(var, predicate)" into a comprehension which ensures that all
+ // elements in the range satisfy the predicate.
+ AllMacro = NewReceiverMacro(operators.All, 2, MakeAll)
+
+ // ExistsMacro expands "range.exists(var, predicate)" into a comprehension which ensures that
+ // some element in the range satisfies the predicate.
+ ExistsMacro = NewReceiverMacro(operators.Exists, 2, MakeExists)
+
+ // ExistsOneMacro expands "range.exists_one(var, predicate)", which is true if for exactly one
+ // element in range the predicate holds.
+ ExistsOneMacro = NewReceiverMacro(operators.ExistsOne, 2, MakeExistsOne)
+
+ // MapMacro expands "range.map(var, function)" into a comprehension which applies the function
+ // to each element in the range to produce a new list.
+ MapMacro = NewReceiverMacro(operators.Map, 2, MakeMap)
+
+ // MapFilterMacro expands "range.map(var, predicate, function)" into a comprehension which
+ // first filters the elements in the range by the predicate, then applies the transform function
+ // to produce a new list.
+ MapFilterMacro = NewReceiverMacro(operators.Map, 3, MakeMap)
+
+ // FilterMacro expands "range.filter(var, predicate)" into a comprehension which filters
+ // elements in the range, producing a new list from the elements that satisfy the predicate.
+ FilterMacro = NewReceiverMacro(operators.Filter, 2, MakeFilter)
+
+ // AllMacros includes the list of all spec-supported macros.
+ AllMacros = []Macro{
+ HasMacro,
+ AllMacro,
+ ExistsMacro,
+ ExistsOneMacro,
+ MapMacro,
+ MapFilterMacro,
+ FilterMacro,
+ }
+
+ // NoMacros list.
+ NoMacros = []Macro{}
+)
+
+// AccumulatorName is the traditional variable name assigned to the fold accumulator variable.
+const AccumulatorName = "__result__"
+
+type quantifierKind int
+
+const (
+ quantifierAll quantifierKind = iota
+ quantifierExists
+ quantifierExistsOne
+)
+
+// MakeAll expands the input call arguments into a comprehension that returns true if all of the
+// elements in the range match the predicate expressions:
+// .all(, )
+func MakeAll(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ return makeQuantifier(quantifierAll, eh, target, args)
+}
+
+// MakeExists expands the input call arguments into a comprehension that returns true if any of the
+// elements in the range match the predicate expressions:
+// .exists(, )
+func MakeExists(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ return makeQuantifier(quantifierExists, eh, target, args)
+}
+
+// MakeExistsOne expands the input call arguments into a comprehension that returns true if exactly
+// one of the elements in the range match the predicate expressions:
+// .exists_one(, )
+func MakeExistsOne(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ return makeQuantifier(quantifierExistsOne, eh, target, args)
+}
+
+// MakeMap expands the input call arguments into a comprehension that transforms each element in the
+// input to produce an output list.
+//
+// There are two call patterns supported by map:
+//
+// .map(, )
+// .map(, , )
+//
+// In the second form only iterVar values which return true when provided to the predicate expression
+// are transformed.
+func MakeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ v, found := extractIdent(args[0])
+ if !found {
+ return nil, eh.NewError(args[0].GetId(), "argument is not an identifier")
+ }
+
+ var fn *exprpb.Expr
+ var filter *exprpb.Expr
+
+ if len(args) == 3 {
+ filter = args[1]
+ fn = args[2]
+ } else {
+ filter = nil
+ fn = args[1]
+ }
+
+ accuExpr := eh.Ident(AccumulatorName)
+ init := eh.NewList()
+ condition := eh.LiteralBool(true)
+ step := eh.GlobalCall(operators.Add, accuExpr, eh.NewList(fn))
+
+ if filter != nil {
+ step = eh.GlobalCall(operators.Conditional, filter, step, accuExpr)
+ }
+ return eh.Fold(v, target, AccumulatorName, init, condition, step, accuExpr), nil
+}
+
+// MakeFilter expands the input call arguments into a comprehension which produces a list which contains
+// only elements which match the provided predicate expression:
+// .filter(, )
+func MakeFilter(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ v, found := extractIdent(args[0])
+ if !found {
+ return nil, eh.NewError(args[0].GetId(), "argument is not an identifier")
+ }
+
+ filter := args[1]
+ accuExpr := eh.Ident(AccumulatorName)
+ init := eh.NewList()
+ condition := eh.LiteralBool(true)
+ step := eh.GlobalCall(operators.Add, accuExpr, eh.NewList(args[0]))
+ step = eh.GlobalCall(operators.Conditional, filter, step, accuExpr)
+ return eh.Fold(v, target, AccumulatorName, init, condition, step, accuExpr), nil
+}
+
+// MakeHas expands the input call arguments into a presence test, e.g. has(.field)
+func MakeHas(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ if s, ok := args[0].ExprKind.(*exprpb.Expr_SelectExpr); ok {
+ return eh.PresenceTest(s.SelectExpr.GetOperand(), s.SelectExpr.GetField()), nil
+ }
+ return nil, eh.NewError(args[0].GetId(), "invalid argument to has() macro")
+}
+
+func makeQuantifier(kind quantifierKind, eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ v, found := extractIdent(args[0])
+ if !found {
+ return nil, eh.NewError(args[0].GetId(), "argument must be a simple name")
+ }
+
+ var init *exprpb.Expr
+ var condition *exprpb.Expr
+ var step *exprpb.Expr
+ var result *exprpb.Expr
+ switch kind {
+ case quantifierAll:
+ init = eh.LiteralBool(true)
+ condition = eh.GlobalCall(operators.NotStrictlyFalse, eh.AccuIdent())
+ step = eh.GlobalCall(operators.LogicalAnd, eh.AccuIdent(), args[1])
+ result = eh.AccuIdent()
+ case quantifierExists:
+ init = eh.LiteralBool(false)
+ condition = eh.GlobalCall(
+ operators.NotStrictlyFalse,
+ eh.GlobalCall(operators.LogicalNot, eh.AccuIdent()))
+ step = eh.GlobalCall(operators.LogicalOr, eh.AccuIdent(), args[1])
+ result = eh.AccuIdent()
+ case quantifierExistsOne:
+ zeroExpr := eh.LiteralInt(0)
+ oneExpr := eh.LiteralInt(1)
+ init = zeroExpr
+ condition = eh.LiteralBool(true)
+ step = eh.GlobalCall(operators.Conditional, args[1],
+ eh.GlobalCall(operators.Add, eh.AccuIdent(), oneExpr), eh.AccuIdent())
+ result = eh.GlobalCall(operators.Equals, eh.AccuIdent(), oneExpr)
+ default:
+ return nil, eh.NewError(args[0].GetId(), fmt.Sprintf("unrecognized quantifier '%v'", kind))
+ }
+ return eh.Fold(v, target, AccumulatorName, init, condition, step, result), nil
+}
+
+func extractIdent(e *exprpb.Expr) (string, bool) {
+ switch e.ExprKind.(type) {
+ case *exprpb.Expr_IdentExpr:
+ return e.GetIdentExpr().GetName(), true
+ }
+ return "", false
+}
diff --git a/vendor/github.com/google/cel-go/parser/options.go b/vendor/github.com/google/cel-go/parser/options.go
new file mode 100644
index 0000000..61fc3ad
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/options.go
@@ -0,0 +1,140 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import "fmt"
+
+type options struct {
+ maxRecursionDepth int
+ errorReportingLimit int
+ errorRecoveryTokenLookaheadLimit int
+ errorRecoveryLimit int
+ expressionSizeCodePointLimit int
+ macros map[string]Macro
+ populateMacroCalls bool
+ enableOptionalSyntax bool
+ enableVariadicOperatorASTs bool
+}
+
+// Option configures the behavior of the parser.
+type Option func(*options) error
+
+// MaxRecursionDepth limits the maximum depth the parser will attempt to parse the expression before giving up.
+func MaxRecursionDepth(limit int) Option {
+ return func(opts *options) error {
+ if limit < -1 {
+ return fmt.Errorf("max recursion depth must be greater than or equal to -1: %d", limit)
+ }
+ opts.maxRecursionDepth = limit
+ return nil
+ }
+}
+
+// ErrorRecoveryLookaheadTokenLimit limits the number of lexer tokens that may be considered during error recovery.
+//
+// Error recovery often involves looking ahead in the input to determine if there's a point at which parsing may
+// successfully resume. In some pathological cases, the parser can look through quite a large set of input which
+// in turn generates a lot of back-tracking and performance degredation.
+//
+// The limit must be >= 1, and is recommended to be less than the default of 256.
+func ErrorRecoveryLookaheadTokenLimit(limit int) Option {
+ return func(opts *options) error {
+ if limit < 1 {
+ return fmt.Errorf("error recovery lookahead token limit must be at least 1: %d", limit)
+ }
+ opts.errorRecoveryTokenLookaheadLimit = limit
+ return nil
+ }
+}
+
+// ErrorRecoveryLimit limits the number of attempts the parser will perform to recover from an error.
+func ErrorRecoveryLimit(limit int) Option {
+ return func(opts *options) error {
+ if limit < -1 {
+ return fmt.Errorf("error recovery limit must be greater than or equal to -1: %d", limit)
+ }
+ opts.errorRecoveryLimit = limit
+ return nil
+ }
+}
+
+// ErrorReportingLimit limits the number of syntax error reports before terminating parsing.
+//
+// The limit must be at least 1. If unset, the limit will be 100.
+func ErrorReportingLimit(limit int) Option {
+ return func(opts *options) error {
+ if limit < 1 {
+ return fmt.Errorf("error reporting limit must be at least 1: %d", limit)
+ }
+ opts.errorReportingLimit = limit
+ return nil
+ }
+}
+
+// ExpressionSizeCodePointLimit is an option which limits the maximum code point count of an
+// expression.
+func ExpressionSizeCodePointLimit(expressionSizeCodePointLimit int) Option {
+ return func(opts *options) error {
+ if expressionSizeCodePointLimit < -1 {
+ return fmt.Errorf("expression size code point limit must be greater than or equal to -1: %d", expressionSizeCodePointLimit)
+ }
+ opts.expressionSizeCodePointLimit = expressionSizeCodePointLimit
+ return nil
+ }
+}
+
+// Macros adds the given macros to the parser.
+func Macros(macros ...Macro) Option {
+ return func(opts *options) error {
+ for _, m := range macros {
+ if m != nil {
+ if opts.macros == nil {
+ opts.macros = make(map[string]Macro)
+ }
+ opts.macros[m.MacroKey()] = m
+ }
+ }
+ return nil
+ }
+}
+
+// PopulateMacroCalls ensures that the original call signatures replaced by expanded macros
+// are preserved in the `SourceInfo` of parse result.
+func PopulateMacroCalls(populateMacroCalls bool) Option {
+ return func(opts *options) error {
+ opts.populateMacroCalls = populateMacroCalls
+ return nil
+ }
+}
+
+// EnableOptionalSyntax enables syntax for optional field and index selection.
+func EnableOptionalSyntax(optionalSyntax bool) Option {
+ return func(opts *options) error {
+ opts.enableOptionalSyntax = optionalSyntax
+ return nil
+ }
+}
+
+// EnableVariadicOperatorASTs enables a compact representation of chained like-kind commutative
+// operators. e.g. `a || b || c || d` -> `call(op='||', args=[a, b, c, d])`
+//
+// The benefit of enabling variadic operators ASTs is a more compact representation deeply nested
+// logic graphs.
+func EnableVariadicOperatorASTs(varArgASTs bool) Option {
+ return func(opts *options) error {
+ opts.enableVariadicOperatorASTs = varArgASTs
+ return nil
+ }
+}
diff --git a/vendor/github.com/google/cel-go/parser/parser.go b/vendor/github.com/google/cel-go/parser/parser.go
new file mode 100644
index 0000000..109326a
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/parser.go
@@ -0,0 +1,1045 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package parser declares an expression parser with support for macro
+// expansion.
+package parser
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+
+ antlr "github.com/antlr/antlr4/runtime/Go/antlr/v4"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/runes"
+ "github.com/google/cel-go/parser/gen"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// Parser encapsulates the context necessary to perform parsing for different expressions.
+type Parser struct {
+ options
+}
+
+// NewParser builds and returns a new Parser using the provided options.
+func NewParser(opts ...Option) (*Parser, error) {
+ p := &Parser{}
+ for _, opt := range opts {
+ if err := opt(&p.options); err != nil {
+ return nil, err
+ }
+ }
+ if p.errorReportingLimit == 0 {
+ p.errorReportingLimit = 100
+ }
+ if p.maxRecursionDepth == 0 {
+ p.maxRecursionDepth = 250
+ }
+ if p.maxRecursionDepth == -1 {
+ p.maxRecursionDepth = int((^uint(0)) >> 1)
+ }
+ if p.errorRecoveryTokenLookaheadLimit == 0 {
+ p.errorRecoveryTokenLookaheadLimit = 256
+ }
+ if p.errorRecoveryLimit == 0 {
+ p.errorRecoveryLimit = 30
+ }
+ if p.errorRecoveryLimit == -1 {
+ p.errorRecoveryLimit = int((^uint(0)) >> 1)
+ }
+ if p.expressionSizeCodePointLimit == 0 {
+ p.expressionSizeCodePointLimit = 100_000
+ }
+ if p.expressionSizeCodePointLimit == -1 {
+ p.expressionSizeCodePointLimit = int((^uint(0)) >> 1)
+ }
+ // Bool is false by default, so populateMacroCalls will be false by default
+ return p, nil
+}
+
+// mustNewParser does the work of NewParser and panics if an error occurs.
+//
+// This function is only intended for internal use and is for backwards compatibility in Parse and
+// ParseWithMacros, where we know the options will result in an error.
+func mustNewParser(opts ...Option) *Parser {
+ p, err := NewParser(opts...)
+ if err != nil {
+ panic(err)
+ }
+ return p
+}
+
+// Parse parses the expression represented by source and returns the result.
+func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors) {
+ errs := common.NewErrors(source)
+ impl := parser{
+ errors: &parseErrors{errs},
+ helper: newParserHelper(source),
+ macros: p.macros,
+ maxRecursionDepth: p.maxRecursionDepth,
+ errorReportingLimit: p.errorReportingLimit,
+ errorRecoveryLimit: p.errorRecoveryLimit,
+ errorRecoveryLookaheadTokenLimit: p.errorRecoveryTokenLookaheadLimit,
+ populateMacroCalls: p.populateMacroCalls,
+ enableOptionalSyntax: p.enableOptionalSyntax,
+ enableVariadicOperatorASTs: p.enableVariadicOperatorASTs,
+ }
+ buf, ok := source.(runes.Buffer)
+ if !ok {
+ buf = runes.NewBuffer(source.Content())
+ }
+ var e *exprpb.Expr
+ if buf.Len() > p.expressionSizeCodePointLimit {
+ e = impl.reportError(common.NoLocation,
+ "expression code point size exceeds limit: size: %d, limit %d",
+ buf.Len(), p.expressionSizeCodePointLimit)
+ } else {
+ e = impl.parse(buf, source.Description())
+ }
+ return &exprpb.ParsedExpr{
+ Expr: e,
+ SourceInfo: impl.helper.getSourceInfo(),
+ }, errs
+}
+
+// reservedIds are not legal to use as variables. We exclude them post-parse, as they *are* valid
+// field names for protos, and it would complicate the grammar to distinguish the cases.
+var reservedIds = map[string]struct{}{
+ "as": {},
+ "break": {},
+ "const": {},
+ "continue": {},
+ "else": {},
+ "false": {},
+ "for": {},
+ "function": {},
+ "if": {},
+ "import": {},
+ "in": {},
+ "let": {},
+ "loop": {},
+ "package": {},
+ "namespace": {},
+ "null": {},
+ "return": {},
+ "true": {},
+ "var": {},
+ "void": {},
+ "while": {},
+}
+
+// Parse converts a source input a parsed expression.
+// This function calls ParseWithMacros with AllMacros.
+//
+// Deprecated: Use NewParser().Parse() instead.
+func Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors) {
+ return mustNewParser(Macros(AllMacros...)).Parse(source)
+}
+
+type recursionError struct {
+ message string
+}
+
+// Error implements error.
+func (re *recursionError) Error() string {
+ return re.message
+}
+
+var _ error = &recursionError{}
+
+type recursionListener struct {
+ maxDepth int
+ ruleTypeDepth map[int]*int
+}
+
+func (rl *recursionListener) VisitTerminal(node antlr.TerminalNode) {}
+
+func (rl *recursionListener) VisitErrorNode(node antlr.ErrorNode) {}
+
+func (rl *recursionListener) EnterEveryRule(ctx antlr.ParserRuleContext) {
+ if ctx == nil {
+ return
+ }
+ ruleIndex := ctx.GetRuleIndex()
+ depth, found := rl.ruleTypeDepth[ruleIndex]
+ if !found {
+ var counter = 1
+ rl.ruleTypeDepth[ruleIndex] = &counter
+ depth = &counter
+ } else {
+ *depth++
+ }
+ if *depth > rl.maxDepth {
+ panic(&recursionError{
+ message: fmt.Sprintf("expression recursion limit exceeded: %d", rl.maxDepth),
+ })
+ }
+}
+
+func (rl *recursionListener) ExitEveryRule(ctx antlr.ParserRuleContext) {
+ if ctx == nil {
+ return
+ }
+ ruleIndex := ctx.GetRuleIndex()
+ if depth, found := rl.ruleTypeDepth[ruleIndex]; found && *depth > 0 {
+ *depth--
+ }
+}
+
+var _ antlr.ParseTreeListener = &recursionListener{}
+
+type tooManyErrors struct {
+ errorReportingLimit int
+}
+
+func (t *tooManyErrors) Error() string {
+ return fmt.Sprintf("More than %d syntax errors", t.errorReportingLimit)
+}
+
+var _ error = &tooManyErrors{}
+
+type recoveryLimitError struct {
+ message string
+}
+
+// Error implements error.
+func (rl *recoveryLimitError) Error() string {
+ return rl.message
+}
+
+type lookaheadLimitError struct {
+ message string
+}
+
+func (ll *lookaheadLimitError) Error() string {
+ return ll.message
+}
+
+var _ error = &recoveryLimitError{}
+
+type recoveryLimitErrorStrategy struct {
+ *antlr.DefaultErrorStrategy
+ errorRecoveryLimit int
+ errorRecoveryTokenLookaheadLimit int
+ recoveryAttempts int
+}
+
+type lookaheadConsumer struct {
+ antlr.Parser
+ errorRecoveryTokenLookaheadLimit int
+ lookaheadAttempts int
+}
+
+func (lc *lookaheadConsumer) Consume() antlr.Token {
+ if lc.lookaheadAttempts >= lc.errorRecoveryTokenLookaheadLimit {
+ panic(&lookaheadLimitError{
+ message: fmt.Sprintf("error recovery token lookahead limit exceeded: %d", lc.errorRecoveryTokenLookaheadLimit),
+ })
+ }
+ lc.lookaheadAttempts++
+ return lc.Parser.Consume()
+}
+
+func (rl *recoveryLimitErrorStrategy) Recover(recognizer antlr.Parser, e antlr.RecognitionException) {
+ rl.checkAttempts(recognizer)
+ lc := &lookaheadConsumer{Parser: recognizer, errorRecoveryTokenLookaheadLimit: rl.errorRecoveryTokenLookaheadLimit}
+ rl.DefaultErrorStrategy.Recover(lc, e)
+}
+
+func (rl *recoveryLimitErrorStrategy) RecoverInline(recognizer antlr.Parser) antlr.Token {
+ rl.checkAttempts(recognizer)
+ lc := &lookaheadConsumer{Parser: recognizer, errorRecoveryTokenLookaheadLimit: rl.errorRecoveryTokenLookaheadLimit}
+ return rl.DefaultErrorStrategy.RecoverInline(lc)
+}
+
+func (rl *recoveryLimitErrorStrategy) checkAttempts(recognizer antlr.Parser) {
+ if rl.recoveryAttempts == rl.errorRecoveryLimit {
+ rl.recoveryAttempts++
+ msg := fmt.Sprintf("error recovery attempt limit exceeded: %d", rl.errorRecoveryLimit)
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+ panic(&recoveryLimitError{
+ message: msg,
+ })
+ }
+ rl.recoveryAttempts++
+}
+
+var _ antlr.ErrorStrategy = &recoveryLimitErrorStrategy{}
+
+type parser struct {
+ gen.BaseCELVisitor
+ errors *parseErrors
+ helper *parserHelper
+ macros map[string]Macro
+ recursionDepth int
+ errorReports int
+ maxRecursionDepth int
+ errorReportingLimit int
+ errorRecoveryLimit int
+ errorRecoveryLookaheadTokenLimit int
+ populateMacroCalls bool
+ enableOptionalSyntax bool
+ enableVariadicOperatorASTs bool
+}
+
+var (
+ _ gen.CELVisitor = (*parser)(nil)
+
+ lexerPool *sync.Pool = &sync.Pool{
+ New: func() any {
+ l := gen.NewCELLexer(nil)
+ l.RemoveErrorListeners()
+ return l
+ },
+ }
+
+ parserPool *sync.Pool = &sync.Pool{
+ New: func() any {
+ p := gen.NewCELParser(nil)
+ p.RemoveErrorListeners()
+ return p
+ },
+ }
+)
+
+func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr {
+ // TODO: get rid of these pools once https://github.com/antlr/antlr4/pull/3571 is in a release
+ lexer := lexerPool.Get().(*gen.CELLexer)
+ prsr := parserPool.Get().(*gen.CELParser)
+
+ prsrListener := &recursionListener{
+ maxDepth: p.maxRecursionDepth,
+ ruleTypeDepth: map[int]*int{},
+ }
+
+ defer func() {
+ // Unfortunately ANTLR Go runtime is missing (*antlr.BaseParser).RemoveParseListeners,
+ // so this is good enough until that is exported.
+ // Reset the lexer and parser before putting them back in the pool.
+ lexer.RemoveErrorListeners()
+ prsr.RemoveParseListener(prsrListener)
+ prsr.RemoveErrorListeners()
+ lexer.SetInputStream(nil)
+ prsr.SetInputStream(nil)
+ lexerPool.Put(lexer)
+ parserPool.Put(prsr)
+ }()
+
+ lexer.SetInputStream(newCharStream(expr, desc))
+ prsr.SetInputStream(antlr.NewCommonTokenStream(lexer, 0))
+
+ lexer.AddErrorListener(p)
+ prsr.AddErrorListener(p)
+ prsr.AddParseListener(prsrListener)
+
+ prsr.SetErrorHandler(&recoveryLimitErrorStrategy{
+ DefaultErrorStrategy: antlr.NewDefaultErrorStrategy(),
+ errorRecoveryLimit: p.errorRecoveryLimit,
+ errorRecoveryTokenLookaheadLimit: p.errorRecoveryLookaheadTokenLimit,
+ })
+
+ defer func() {
+ if val := recover(); val != nil {
+ switch err := val.(type) {
+ case *lookaheadLimitError:
+ p.errors.internalError(err.Error())
+ case *recursionError:
+ p.errors.internalError(err.Error())
+ case *tooManyErrors:
+ // do nothing
+ case *recoveryLimitError:
+ // do nothing, listeners already notified and error reported.
+ default:
+ panic(val)
+ }
+ }
+ }()
+
+ return p.Visit(prsr.Start()).(*exprpb.Expr)
+}
+
+// Visitor implementations.
+func (p *parser) Visit(tree antlr.ParseTree) any {
+ t := unnest(tree)
+ switch tree := t.(type) {
+ case *gen.StartContext:
+ return p.VisitStart(tree)
+ case *gen.ExprContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitExpr(tree)
+ p.decrementRecursionDepth()
+ return out
+ case *gen.ConditionalAndContext:
+ return p.VisitConditionalAnd(tree)
+ case *gen.ConditionalOrContext:
+ return p.VisitConditionalOr(tree)
+ case *gen.RelationContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitRelation(tree)
+ p.decrementRecursionDepth()
+ return out
+ case *gen.CalcContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitCalc(tree)
+ p.decrementRecursionDepth()
+ return out
+ case *gen.LogicalNotContext:
+ return p.VisitLogicalNot(tree)
+ case *gen.IdentOrGlobalCallContext:
+ return p.VisitIdentOrGlobalCall(tree)
+ case *gen.SelectContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitSelect(tree)
+ p.decrementRecursionDepth()
+ return out
+ case *gen.MemberCallContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitMemberCall(tree)
+ p.decrementRecursionDepth()
+ return out
+ case *gen.MapInitializerListContext:
+ return p.VisitMapInitializerList(tree)
+ case *gen.NegateContext:
+ return p.VisitNegate(tree)
+ case *gen.IndexContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitIndex(tree)
+ p.decrementRecursionDepth()
+ return out
+ case *gen.UnaryContext:
+ return p.VisitUnary(tree)
+ case *gen.CreateListContext:
+ return p.VisitCreateList(tree)
+ case *gen.CreateMessageContext:
+ return p.VisitCreateMessage(tree)
+ case *gen.CreateStructContext:
+ return p.VisitCreateStruct(tree)
+ case *gen.IntContext:
+ return p.VisitInt(tree)
+ case *gen.UintContext:
+ return p.VisitUint(tree)
+ case *gen.DoubleContext:
+ return p.VisitDouble(tree)
+ case *gen.StringContext:
+ return p.VisitString(tree)
+ case *gen.BytesContext:
+ return p.VisitBytes(tree)
+ case *gen.BoolFalseContext:
+ return p.VisitBoolFalse(tree)
+ case *gen.BoolTrueContext:
+ return p.VisitBoolTrue(tree)
+ case *gen.NullContext:
+ return p.VisitNull(tree)
+ }
+
+ // Report at least one error if the parser reaches an unknown parse element.
+ // Typically, this happens if the parser has already encountered a syntax error elsewhere.
+ if p.errors.errorCount() == 0 {
+ txt := "<>"
+ if t != nil {
+ txt = fmt.Sprintf("<<%T>>", t)
+ }
+ return p.reportError(common.NoLocation, "unknown parse element encountered: %s", txt)
+ }
+ return p.helper.newExpr(common.NoLocation)
+
+}
+
+// Visit a parse tree produced by CELParser#start.
+func (p *parser) VisitStart(ctx *gen.StartContext) any {
+ return p.Visit(ctx.Expr())
+}
+
+// Visit a parse tree produced by CELParser#expr.
+func (p *parser) VisitExpr(ctx *gen.ExprContext) any {
+ result := p.Visit(ctx.GetE()).(*exprpb.Expr)
+ if ctx.GetOp() == nil {
+ return result
+ }
+ opID := p.helper.id(ctx.GetOp())
+ ifTrue := p.Visit(ctx.GetE1()).(*exprpb.Expr)
+ ifFalse := p.Visit(ctx.GetE2()).(*exprpb.Expr)
+ return p.globalCallOrMacro(opID, operators.Conditional, result, ifTrue, ifFalse)
+}
+
+// Visit a parse tree produced by CELParser#conditionalOr.
+func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any {
+ result := p.Visit(ctx.GetE()).(*exprpb.Expr)
+ l := p.newLogicManager(operators.LogicalOr, result)
+ rest := ctx.GetE1()
+ for i, op := range ctx.GetOps() {
+ if i >= len(rest) {
+ return p.reportError(ctx, "unexpected character, wanted '||'")
+ }
+ next := p.Visit(rest[i]).(*exprpb.Expr)
+ opID := p.helper.id(op)
+ l.addTerm(opID, next)
+ }
+ return l.toExpr()
+}
+
+// Visit a parse tree produced by CELParser#conditionalAnd.
+func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) any {
+ result := p.Visit(ctx.GetE()).(*exprpb.Expr)
+ l := p.newLogicManager(operators.LogicalAnd, result)
+ rest := ctx.GetE1()
+ for i, op := range ctx.GetOps() {
+ if i >= len(rest) {
+ return p.reportError(ctx, "unexpected character, wanted '&&'")
+ }
+ next := p.Visit(rest[i]).(*exprpb.Expr)
+ opID := p.helper.id(op)
+ l.addTerm(opID, next)
+ }
+ return l.toExpr()
+}
+
+// Visit a parse tree produced by CELParser#relation.
+func (p *parser) VisitRelation(ctx *gen.RelationContext) any {
+ opText := ""
+ if ctx.GetOp() != nil {
+ opText = ctx.GetOp().GetText()
+ }
+ if op, found := operators.Find(opText); found {
+ lhs := p.Visit(ctx.Relation(0)).(*exprpb.Expr)
+ opID := p.helper.id(ctx.GetOp())
+ rhs := p.Visit(ctx.Relation(1)).(*exprpb.Expr)
+ return p.globalCallOrMacro(opID, op, lhs, rhs)
+ }
+ return p.reportError(ctx, "operator not found")
+}
+
+// Visit a parse tree produced by CELParser#calc.
+func (p *parser) VisitCalc(ctx *gen.CalcContext) any {
+ opText := ""
+ if ctx.GetOp() != nil {
+ opText = ctx.GetOp().GetText()
+ }
+ if op, found := operators.Find(opText); found {
+ lhs := p.Visit(ctx.Calc(0)).(*exprpb.Expr)
+ opID := p.helper.id(ctx.GetOp())
+ rhs := p.Visit(ctx.Calc(1)).(*exprpb.Expr)
+ return p.globalCallOrMacro(opID, op, lhs, rhs)
+ }
+ return p.reportError(ctx, "operator not found")
+}
+
+func (p *parser) VisitUnary(ctx *gen.UnaryContext) any {
+ return p.helper.newLiteralString(ctx, "<>")
+}
+
+// Visit a parse tree produced by CELParser#LogicalNot.
+func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) any {
+ if len(ctx.GetOps())%2 == 0 {
+ return p.Visit(ctx.Member())
+ }
+ opID := p.helper.id(ctx.GetOps()[0])
+ target := p.Visit(ctx.Member()).(*exprpb.Expr)
+ return p.globalCallOrMacro(opID, operators.LogicalNot, target)
+}
+
+func (p *parser) VisitNegate(ctx *gen.NegateContext) any {
+ if len(ctx.GetOps())%2 == 0 {
+ return p.Visit(ctx.Member())
+ }
+ opID := p.helper.id(ctx.GetOps()[0])
+ target := p.Visit(ctx.Member()).(*exprpb.Expr)
+ return p.globalCallOrMacro(opID, operators.Negate, target)
+}
+
+// VisitSelect visits a parse tree produced by CELParser#Select.
+func (p *parser) VisitSelect(ctx *gen.SelectContext) any {
+ operand := p.Visit(ctx.Member()).(*exprpb.Expr)
+ // Handle the error case where no valid identifier is specified.
+ if ctx.GetId() == nil || ctx.GetOp() == nil {
+ return p.helper.newExpr(ctx)
+ }
+ id := ctx.GetId().GetText()
+ if ctx.GetOpt() != nil {
+ if !p.enableOptionalSyntax {
+ return p.reportError(ctx.GetOp(), "unsupported syntax '.?'")
+ }
+ return p.helper.newGlobalCall(
+ ctx.GetOp(),
+ operators.OptSelect,
+ operand,
+ p.helper.newLiteralString(ctx.GetId(), id))
+ }
+ return p.helper.newSelect(ctx.GetOp(), operand, id)
+}
+
+// VisitMemberCall visits a parse tree produced by CELParser#MemberCall.
+func (p *parser) VisitMemberCall(ctx *gen.MemberCallContext) any {
+ operand := p.Visit(ctx.Member()).(*exprpb.Expr)
+ // Handle the error case where no valid identifier is specified.
+ if ctx.GetId() == nil {
+ return p.helper.newExpr(ctx)
+ }
+ id := ctx.GetId().GetText()
+ opID := p.helper.id(ctx.GetOpen())
+ return p.receiverCallOrMacro(opID, id, operand, p.visitExprList(ctx.GetArgs())...)
+}
+
+// Visit a parse tree produced by CELParser#Index.
+func (p *parser) VisitIndex(ctx *gen.IndexContext) any {
+ target := p.Visit(ctx.Member()).(*exprpb.Expr)
+ // Handle the error case where no valid identifier is specified.
+ if ctx.GetOp() == nil {
+ return p.helper.newExpr(ctx)
+ }
+ opID := p.helper.id(ctx.GetOp())
+ index := p.Visit(ctx.GetIndex()).(*exprpb.Expr)
+ operator := operators.Index
+ if ctx.GetOpt() != nil {
+ if !p.enableOptionalSyntax {
+ return p.reportError(ctx.GetOp(), "unsupported syntax '[?'")
+ }
+ operator = operators.OptIndex
+ }
+ return p.globalCallOrMacro(opID, operator, target, index)
+}
+
+// Visit a parse tree produced by CELParser#CreateMessage.
+func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) any {
+ messageName := ""
+ for _, id := range ctx.GetIds() {
+ if len(messageName) != 0 {
+ messageName += "."
+ }
+ messageName += id.GetText()
+ }
+ if ctx.GetLeadingDot() != nil {
+ messageName = "." + messageName
+ }
+ objID := p.helper.id(ctx.GetOp())
+ entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry)
+ return p.helper.newObject(objID, messageName, entries...)
+}
+
+// Visit a parse tree of field initializers.
+func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext) any {
+ if ctx == nil || ctx.GetFields() == nil {
+ // This is the result of a syntax error handled elswhere, return empty.
+ return []*exprpb.Expr_CreateStruct_Entry{}
+ }
+
+ result := make([]*exprpb.Expr_CreateStruct_Entry, len(ctx.GetFields()))
+ cols := ctx.GetCols()
+ vals := ctx.GetValues()
+ for i, f := range ctx.GetFields() {
+ if i >= len(cols) || i >= len(vals) {
+ // This is the result of a syntax error detected elsewhere.
+ return []*exprpb.Expr_CreateStruct_Entry{}
+ }
+ initID := p.helper.id(cols[i])
+ optField := f.(*gen.OptFieldContext)
+ optional := optField.GetOpt() != nil
+ if !p.enableOptionalSyntax && optional {
+ p.reportError(optField, "unsupported syntax '?'")
+ continue
+ }
+ // The field may be empty due to a prior error.
+ id := optField.IDENTIFIER()
+ if id == nil {
+ return []*exprpb.Expr_CreateStruct_Entry{}
+ }
+ fieldName := id.GetText()
+ value := p.Visit(vals[i]).(*exprpb.Expr)
+ field := p.helper.newObjectField(initID, fieldName, value, optional)
+ result[i] = field
+ }
+ return result
+}
+
+// Visit a parse tree produced by CELParser#IdentOrGlobalCall.
+func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) any {
+ identName := ""
+ if ctx.GetLeadingDot() != nil {
+ identName = "."
+ }
+ // Handle the error case where no valid identifier is specified.
+ if ctx.GetId() == nil {
+ return p.helper.newExpr(ctx)
+ }
+ // Handle reserved identifiers.
+ id := ctx.GetId().GetText()
+ if _, ok := reservedIds[id]; ok {
+ return p.reportError(ctx, "reserved identifier: %s", id)
+ }
+ identName += id
+ if ctx.GetOp() != nil {
+ opID := p.helper.id(ctx.GetOp())
+ return p.globalCallOrMacro(opID, identName, p.visitExprList(ctx.GetArgs())...)
+ }
+ return p.helper.newIdent(ctx.GetId(), identName)
+}
+
+// Visit a parse tree produced by CELParser#CreateList.
+func (p *parser) VisitCreateList(ctx *gen.CreateListContext) any {
+ listID := p.helper.id(ctx.GetOp())
+ elems, optionals := p.visitListInit(ctx.GetElems())
+ return p.helper.newList(listID, elems, optionals...)
+}
+
+// Visit a parse tree produced by CELParser#CreateStruct.
+func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) any {
+ structID := p.helper.id(ctx.GetOp())
+ entries := []*exprpb.Expr_CreateStruct_Entry{}
+ if ctx.GetEntries() != nil {
+ entries = p.Visit(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry)
+ }
+ return p.helper.newMap(structID, entries...)
+}
+
+// Visit a parse tree produced by CELParser#mapInitializerList.
+func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) any {
+ if ctx == nil || ctx.GetKeys() == nil {
+ // This is the result of a syntax error handled elswhere, return empty.
+ return []*exprpb.Expr_CreateStruct_Entry{}
+ }
+
+ result := make([]*exprpb.Expr_CreateStruct_Entry, len(ctx.GetCols()))
+ keys := ctx.GetKeys()
+ vals := ctx.GetValues()
+ for i, col := range ctx.GetCols() {
+ colID := p.helper.id(col)
+ if i >= len(keys) || i >= len(vals) {
+ // This is the result of a syntax error detected elsewhere.
+ return []*exprpb.Expr_CreateStruct_Entry{}
+ }
+ optKey := keys[i]
+ optional := optKey.GetOpt() != nil
+ if !p.enableOptionalSyntax && optional {
+ p.reportError(optKey, "unsupported syntax '?'")
+ continue
+ }
+ key := p.Visit(optKey.GetE()).(*exprpb.Expr)
+ value := p.Visit(vals[i]).(*exprpb.Expr)
+ entry := p.helper.newMapEntry(colID, key, value, optional)
+ result[i] = entry
+ }
+ return result
+}
+
+// Visit a parse tree produced by CELParser#Int.
+func (p *parser) VisitInt(ctx *gen.IntContext) any {
+ text := ctx.GetTok().GetText()
+ base := 10
+ if strings.HasPrefix(text, "0x") {
+ base = 16
+ text = text[2:]
+ }
+ if ctx.GetSign() != nil {
+ text = ctx.GetSign().GetText() + text
+ }
+ i, err := strconv.ParseInt(text, base, 64)
+ if err != nil {
+ return p.reportError(ctx, "invalid int literal")
+ }
+ return p.helper.newLiteralInt(ctx, i)
+}
+
+// Visit a parse tree produced by CELParser#Uint.
+func (p *parser) VisitUint(ctx *gen.UintContext) any {
+ text := ctx.GetTok().GetText()
+ // trim the 'u' designator included in the uint literal.
+ text = text[:len(text)-1]
+ base := 10
+ if strings.HasPrefix(text, "0x") {
+ base = 16
+ text = text[2:]
+ }
+ i, err := strconv.ParseUint(text, base, 64)
+ if err != nil {
+ return p.reportError(ctx, "invalid uint literal")
+ }
+ return p.helper.newLiteralUint(ctx, i)
+}
+
+// Visit a parse tree produced by CELParser#Double.
+func (p *parser) VisitDouble(ctx *gen.DoubleContext) any {
+ txt := ctx.GetTok().GetText()
+ if ctx.GetSign() != nil {
+ txt = ctx.GetSign().GetText() + txt
+ }
+ f, err := strconv.ParseFloat(txt, 64)
+ if err != nil {
+ return p.reportError(ctx, "invalid double literal")
+ }
+ return p.helper.newLiteralDouble(ctx, f)
+
+}
+
+// Visit a parse tree produced by CELParser#String.
+func (p *parser) VisitString(ctx *gen.StringContext) any {
+ s := p.unquote(ctx, ctx.GetText(), false)
+ return p.helper.newLiteralString(ctx, s)
+}
+
+// Visit a parse tree produced by CELParser#Bytes.
+func (p *parser) VisitBytes(ctx *gen.BytesContext) any {
+ b := []byte(p.unquote(ctx, ctx.GetTok().GetText()[1:], true))
+ return p.helper.newLiteralBytes(ctx, b)
+}
+
+// Visit a parse tree produced by CELParser#BoolTrue.
+func (p *parser) VisitBoolTrue(ctx *gen.BoolTrueContext) any {
+ return p.helper.newLiteralBool(ctx, true)
+}
+
+// Visit a parse tree produced by CELParser#BoolFalse.
+func (p *parser) VisitBoolFalse(ctx *gen.BoolFalseContext) any {
+ return p.helper.newLiteralBool(ctx, false)
+}
+
+// Visit a parse tree produced by CELParser#Null.
+func (p *parser) VisitNull(ctx *gen.NullContext) any {
+ return p.helper.newLiteral(ctx,
+ &exprpb.Constant{
+ ConstantKind: &exprpb.Constant_NullValue{
+ NullValue: structpb.NullValue_NULL_VALUE}})
+}
+
+func (p *parser) visitExprList(ctx gen.IExprListContext) []*exprpb.Expr {
+ if ctx == nil {
+ return []*exprpb.Expr{}
+ }
+ return p.visitSlice(ctx.GetE())
+}
+
+func (p *parser) visitListInit(ctx gen.IListInitContext) ([]*exprpb.Expr, []int32) {
+ if ctx == nil {
+ return []*exprpb.Expr{}, []int32{}
+ }
+ elements := ctx.GetElems()
+ result := make([]*exprpb.Expr, len(elements))
+ optionals := []int32{}
+ for i, e := range elements {
+ ex := p.Visit(e.GetE()).(*exprpb.Expr)
+ if ex == nil {
+ return []*exprpb.Expr{}, []int32{}
+ }
+ result[i] = ex
+ if e.GetOpt() != nil {
+ if !p.enableOptionalSyntax {
+ p.reportError(e.GetOpt(), "unsupported syntax '?'")
+ continue
+ }
+ optionals = append(optionals, int32(i))
+ }
+ }
+ return result, optionals
+}
+
+func (p *parser) visitSlice(expressions []gen.IExprContext) []*exprpb.Expr {
+ if expressions == nil {
+ return []*exprpb.Expr{}
+ }
+ result := make([]*exprpb.Expr, len(expressions))
+ for i, e := range expressions {
+ ex := p.Visit(e).(*exprpb.Expr)
+ result[i] = ex
+ }
+ return result
+}
+
+func (p *parser) unquote(ctx any, value string, isBytes bool) string {
+ text, err := unescape(value, isBytes)
+ if err != nil {
+ p.reportError(ctx, "%s", err.Error())
+ return value
+ }
+ return text
+}
+
+func (p *parser) newLogicManager(function string, term *exprpb.Expr) *logicManager {
+ if p.enableVariadicOperatorASTs {
+ return newVariadicLogicManager(p.helper, function, term)
+ }
+ return newBalancingLogicManager(p.helper, function, term)
+}
+
+func (p *parser) reportError(ctx any, format string, args ...any) *exprpb.Expr {
+ var location common.Location
+ err := p.helper.newExpr(ctx)
+ switch c := ctx.(type) {
+ case common.Location:
+ location = c
+ case antlr.Token, antlr.ParserRuleContext:
+ location = p.helper.getLocation(err.GetId())
+ }
+ // Provide arguments to the report error.
+ p.errors.reportErrorAtID(err.GetId(), location, format, args...)
+ return err
+}
+
+// ANTLR Parse listener implementations
+func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) {
+ l := p.helper.source.NewLocation(line, column)
+ // Hack to keep existing error messages consistent with previous versions of CEL when a reserved word
+ // is used as an identifier. This behavior needs to be overhauled to provide consistent, normalized error
+ // messages out of ANTLR to prevent future breaking changes related to error message content.
+ if strings.Contains(msg, "no viable alternative") {
+ msg = reservedIdentifier.ReplaceAllString(msg, mismatchedReservedIdentifier)
+ }
+ // Ensure that no more than 100 syntax errors are reported as this will halt attempts to recover from a
+ // seriously broken expression.
+ if p.errorReports < p.errorReportingLimit {
+ p.errorReports++
+ p.errors.syntaxError(l, msg)
+ } else {
+ tme := &tooManyErrors{errorReportingLimit: p.errorReportingLimit}
+ p.errors.syntaxError(l, tme.Error())
+ panic(tme)
+ }
+}
+
+func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs antlr.ATNConfigSet) {
+ // Intentional
+}
+
+func (p *parser) ReportAttemptingFullContext(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, conflictingAlts *antlr.BitSet, configs antlr.ATNConfigSet) {
+ // Intentional
+}
+
+func (p *parser) ReportContextSensitivity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex, prediction int, configs antlr.ATNConfigSet) {
+ // Intentional
+}
+
+func (p *parser) globalCallOrMacro(exprID int64, function string, args ...*exprpb.Expr) *exprpb.Expr {
+ if expr, found := p.expandMacro(exprID, function, nil, args...); found {
+ return expr
+ }
+ return p.helper.newGlobalCall(exprID, function, args...)
+}
+
+func (p *parser) receiverCallOrMacro(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
+ if expr, found := p.expandMacro(exprID, function, target, args...); found {
+ return expr
+ }
+ return p.helper.newReceiverCall(exprID, function, target, args...)
+}
+
+func (p *parser) expandMacro(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) (*exprpb.Expr, bool) {
+ macro, found := p.macros[makeMacroKey(function, len(args), target != nil)]
+ if !found {
+ macro, found = p.macros[makeVarArgMacroKey(function, target != nil)]
+ if !found {
+ return nil, false
+ }
+ }
+ eh := exprHelperPool.Get().(*exprHelper)
+ defer exprHelperPool.Put(eh)
+ eh.parserHelper = p.helper
+ eh.id = exprID
+ expr, err := macro.Expander()(eh, target, args)
+ // An error indicates that the macro was matched, but the arguments were not well-formed.
+ if err != nil {
+ if err.Location != nil {
+ return p.reportError(err.Location, err.Message), true
+ }
+ return p.reportError(p.helper.getLocation(exprID), err.Message), true
+ }
+ // A nil value from the macro indicates that the macro implementation decided that
+ // an expansion should not be performed.
+ if expr == nil {
+ return nil, false
+ }
+ if p.populateMacroCalls {
+ p.helper.addMacroCall(expr.GetId(), function, target, args...)
+ }
+ return expr, true
+}
+
+func (p *parser) checkAndIncrementRecursionDepth() {
+ p.recursionDepth++
+ if p.recursionDepth > p.maxRecursionDepth {
+ panic(&recursionError{message: "max recursion depth exceeded"})
+ }
+}
+
+func (p *parser) decrementRecursionDepth() {
+ p.recursionDepth--
+}
+
+// unnest traverses down the left-hand side of the parse graph until it encounters the first compound
+// parse node or the first leaf in the parse graph.
+func unnest(tree antlr.ParseTree) antlr.ParseTree {
+ for tree != nil {
+ switch t := tree.(type) {
+ case *gen.ExprContext:
+ // conditionalOr op='?' conditionalOr : expr
+ if t.GetOp() != nil {
+ return t
+ }
+ // conditionalOr
+ tree = t.GetE()
+ case *gen.ConditionalOrContext:
+ // conditionalAnd (ops=|| conditionalAnd)*
+ if t.GetOps() != nil && len(t.GetOps()) > 0 {
+ return t
+ }
+ // conditionalAnd
+ tree = t.GetE()
+ case *gen.ConditionalAndContext:
+ // relation (ops=&& relation)*
+ if t.GetOps() != nil && len(t.GetOps()) > 0 {
+ return t
+ }
+ // relation
+ tree = t.GetE()
+ case *gen.RelationContext:
+ // relation op relation
+ if t.GetOp() != nil {
+ return t
+ }
+ // calc
+ tree = t.Calc()
+ case *gen.CalcContext:
+ // calc op calc
+ if t.GetOp() != nil {
+ return t
+ }
+ // unary
+ tree = t.Unary()
+ case *gen.MemberExprContext:
+ // member expands to one of: primary, select, index, or create message
+ tree = t.Member()
+ case *gen.PrimaryExprContext:
+ // primary expands to one of identifier, nested, create list, create struct, literal
+ tree = t.Primary()
+ case *gen.NestedContext:
+ // contains a nested 'expr'
+ tree = t.GetE()
+ case *gen.ConstantLiteralContext:
+ // expands to a primitive literal
+ tree = t.Literal()
+ default:
+ return t
+ }
+ }
+ return tree
+}
+
+var (
+ reservedIdentifier = regexp.MustCompile("no viable alternative at input '.(true|false|null)'")
+ mismatchedReservedIdentifier = "mismatched input '$1' expecting IDENTIFIER"
+)
diff --git a/vendor/github.com/google/cel-go/parser/unescape.go b/vendor/github.com/google/cel-go/parser/unescape.go
new file mode 100644
index 0000000..27c57a9
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/unescape.go
@@ -0,0 +1,237 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+// Unescape takes a quoted string, unquotes, and unescapes it.
+//
+// This function performs escaping compatible with GoogleSQL.
+func unescape(value string, isBytes bool) (string, error) {
+ // All strings normalize newlines to the \n representation.
+ value = newlineNormalizer.Replace(value)
+ n := len(value)
+
+ // Nothing to unescape / decode.
+ if n < 2 {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+
+ // Raw string preceded by the 'r|R' prefix.
+ isRawLiteral := false
+ if value[0] == 'r' || value[0] == 'R' {
+ value = value[1:]
+ n = len(value)
+ isRawLiteral = true
+ }
+
+ // Quoted string of some form, must have same first and last char.
+ if value[0] != value[n-1] || (value[0] != '"' && value[0] != '\'') {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+
+ // Normalize the multi-line CEL string representation to a standard
+ // Go quoted string.
+ if n >= 6 {
+ if strings.HasPrefix(value, "'''") {
+ if !strings.HasSuffix(value, "'''") {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+ value = "\"" + value[3:n-3] + "\""
+ } else if strings.HasPrefix(value, `"""`) {
+ if !strings.HasSuffix(value, `"""`) {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+ value = "\"" + value[3:n-3] + "\""
+ }
+ n = len(value)
+ }
+ value = value[1 : n-1]
+ // If there is nothing to escape, then return.
+ if isRawLiteral || !strings.ContainsRune(value, '\\') {
+ return value, nil
+ }
+
+ // Otherwise the string contains escape characters.
+ // The following logic is adapted from `strconv/quote.go`
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*n/2)
+ for len(value) > 0 {
+ c, encode, rest, err := unescapeChar(value, isBytes)
+ if err != nil {
+ return "", err
+ }
+ value = rest
+ if c < utf8.RuneSelf || !encode {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ }
+ return string(buf), nil
+}
+
+// unescapeChar takes a string input and returns the following info:
+//
+// value - the escaped unicode rune at the front of the string.
+// encode - the value should be unicode-encoded
+// tail - the remainder of the input string.
+// err - error value, if the character could not be unescaped.
+//
+// When encode is true the return value may still fit within a single byte,
+// but unicode encoding is attempted which is more expensive than when the
+// value is known to self-represent as a single byte.
+//
+// If isBytes is set, unescape as a bytes literal so octal and hex escapes
+// represent byte values, not unicode code points.
+func unescapeChar(s string, isBytes bool) (value rune, encode bool, tail string, err error) {
+ // 1. Character is not an escape sequence.
+ switch c := s[0]; {
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // 2. Last character is the start of an escape sequence.
+ if len(s) <= 1 {
+ err = fmt.Errorf("unable to unescape string, found '\\' as last character")
+ return
+ }
+
+ c := s[1]
+ s = s[2:]
+ // 3. Common escape sequences shared with Google SQL
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case '\\':
+ value = '\\'
+ case '\'':
+ value = '\''
+ case '"':
+ value = '"'
+ case '`':
+ value = '`'
+ case '?':
+ value = '?'
+
+ // 4. Unicode escape sequences, reproduced from `strconv/quote.go`
+ case 'x', 'X', 'u', 'U':
+ n := 0
+ encode = true
+ switch c {
+ case 'x', 'X':
+ n = 2
+ encode = !isBytes
+ case 'u':
+ n = 4
+ if isBytes {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ case 'U':
+ n = 8
+ if isBytes {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ }
+ var v rune
+ if len(s) < n {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if !isBytes && v > utf8.MaxRune {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ value = v
+
+ // 5. Octal escape sequences, must be three digits \[0-3][0-7][0-7]
+ case '0', '1', '2', '3':
+ if len(s) < 2 {
+ err = fmt.Errorf("unable to unescape octal sequence in string")
+ return
+ }
+ v := rune(c - '0')
+ for j := 0; j < 2; j++ {
+ x := s[j]
+ if x < '0' || x > '7' {
+ err = fmt.Errorf("unable to unescape octal sequence in string")
+ return
+ }
+ v = v*8 + rune(x-'0')
+ }
+ if !isBytes && v > utf8.MaxRune {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ value = v
+ s = s[2:]
+ encode = !isBytes
+
+ // Unknown escape sequence.
+ default:
+ err = fmt.Errorf("unable to unescape string")
+ }
+
+ tail = s
+ return
+}
+
+func unhex(b byte) (rune, bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return 0, false
+}
+
+var (
+ newlineNormalizer = strings.NewReplacer("\r\n", "\n", "\r", "\n")
+)
diff --git a/vendor/github.com/google/cel-go/parser/unparser.go b/vendor/github.com/google/cel-go/parser/unparser.go
new file mode 100644
index 0000000..c3c40a0
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/unparser.go
@@ -0,0 +1,637 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/google/cel-go/common/operators"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Unparse takes an input expression and source position information and generates a human-readable
+// expression.
+//
+// Note, unparsing an AST will often generate the same expression as was originally parsed, but some
+// formatting may be lost in translation, notably:
+//
+// - All quoted literals are doubled quoted.
+// - Byte literals are represented as octal escapes (same as Google SQL).
+// - Floating point values are converted to the small number of digits needed to represent the value.
+// - Spacing around punctuation marks may be lost.
+// - Parentheses will only be applied when they affect operator precedence.
+//
+// This function optionally takes in one or more UnparserOption to alter the unparsing behavior, such as
+// performing word wrapping on expressions.
+func Unparse(expr *exprpb.Expr, info *exprpb.SourceInfo, opts ...UnparserOption) (string, error) {
+ unparserOpts := &unparserOption{
+ wrapOnColumn: defaultWrapOnColumn,
+ wrapAfterColumnLimit: defaultWrapAfterColumnLimit,
+ operatorsToWrapOn: defaultOperatorsToWrapOn,
+ }
+
+ var err error
+ for _, opt := range opts {
+ unparserOpts, err = opt(unparserOpts)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ un := &unparser{
+ info: info,
+ options: unparserOpts,
+ }
+ err = un.visit(expr)
+ if err != nil {
+ return "", err
+ }
+ return un.str.String(), nil
+}
+
+// unparser visits an expression to reconstruct a human-readable string from an AST.
+type unparser struct {
+ str strings.Builder
+ info *exprpb.SourceInfo
+ options *unparserOption
+ lastWrappedIndex int
+}
+
+func (un *unparser) visit(expr *exprpb.Expr) error {
+ if expr == nil {
+ return errors.New("unsupported expression")
+ }
+ visited, err := un.visitMaybeMacroCall(expr)
+ if visited || err != nil {
+ return err
+ }
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_CallExpr:
+ return un.visitCall(expr)
+ case *exprpb.Expr_ConstExpr:
+ return un.visitConst(expr)
+ case *exprpb.Expr_IdentExpr:
+ return un.visitIdent(expr)
+ case *exprpb.Expr_ListExpr:
+ return un.visitList(expr)
+ case *exprpb.Expr_SelectExpr:
+ return un.visitSelect(expr)
+ case *exprpb.Expr_StructExpr:
+ return un.visitStruct(expr)
+ default:
+ return fmt.Errorf("unsupported expression: %v", expr)
+ }
+}
+
+func (un *unparser) visitCall(expr *exprpb.Expr) error {
+ c := expr.GetCallExpr()
+ fun := c.GetFunction()
+ switch fun {
+ // ternary operator
+ case operators.Conditional:
+ return un.visitCallConditional(expr)
+ // optional select operator
+ case operators.OptSelect:
+ return un.visitOptSelect(expr)
+ // index operator
+ case operators.Index:
+ return un.visitCallIndex(expr)
+ // optional index operator
+ case operators.OptIndex:
+ return un.visitCallOptIndex(expr)
+ // unary operators
+ case operators.LogicalNot, operators.Negate:
+ return un.visitCallUnary(expr)
+ // binary operators
+ case operators.Add,
+ operators.Divide,
+ operators.Equals,
+ operators.Greater,
+ operators.GreaterEquals,
+ operators.In,
+ operators.Less,
+ operators.LessEquals,
+ operators.LogicalAnd,
+ operators.LogicalOr,
+ operators.Modulo,
+ operators.Multiply,
+ operators.NotEquals,
+ operators.OldIn,
+ operators.Subtract:
+ return un.visitCallBinary(expr)
+ // standard function calls.
+ default:
+ return un.visitCallFunc(expr)
+ }
+}
+
+func (un *unparser) visitCallBinary(expr *exprpb.Expr) error {
+ c := expr.GetCallExpr()
+ fun := c.GetFunction()
+ args := c.GetArgs()
+ lhs := args[0]
+ // add parens if the current operator is lower precedence than the lhs expr operator.
+ lhsParen := isComplexOperatorWithRespectTo(fun, lhs)
+ rhs := args[1]
+ // add parens if the current operator is lower precedence than the rhs expr operator,
+ // or the same precedence and the operator is left recursive.
+ rhsParen := isComplexOperatorWithRespectTo(fun, rhs)
+ if !rhsParen && isLeftRecursive(fun) {
+ rhsParen = isSamePrecedence(fun, rhs)
+ }
+ err := un.visitMaybeNested(lhs, lhsParen)
+ if err != nil {
+ return err
+ }
+ unmangled, found := operators.FindReverseBinaryOperator(fun)
+ if !found {
+ return fmt.Errorf("cannot unmangle operator: %s", fun)
+ }
+
+ un.writeOperatorWithWrapping(fun, unmangled)
+ return un.visitMaybeNested(rhs, rhsParen)
+}
+
+func (un *unparser) visitCallConditional(expr *exprpb.Expr) error {
+ c := expr.GetCallExpr()
+ args := c.GetArgs()
+ // add parens if operand is a conditional itself.
+ nested := isSamePrecedence(operators.Conditional, args[0]) ||
+ isComplexOperator(args[0])
+ err := un.visitMaybeNested(args[0], nested)
+ if err != nil {
+ return err
+ }
+ un.writeOperatorWithWrapping(operators.Conditional, "?")
+
+ // add parens if operand is a conditional itself.
+ nested = isSamePrecedence(operators.Conditional, args[1]) ||
+ isComplexOperator(args[1])
+ err = un.visitMaybeNested(args[1], nested)
+ if err != nil {
+ return err
+ }
+
+ un.str.WriteString(" : ")
+ // add parens if operand is a conditional itself.
+ nested = isSamePrecedence(operators.Conditional, args[2]) ||
+ isComplexOperator(args[2])
+
+ return un.visitMaybeNested(args[2], nested)
+}
+
+func (un *unparser) visitCallFunc(expr *exprpb.Expr) error {
+ c := expr.GetCallExpr()
+ fun := c.GetFunction()
+ args := c.GetArgs()
+ if c.GetTarget() != nil {
+ nested := isBinaryOrTernaryOperator(c.GetTarget())
+ err := un.visitMaybeNested(c.GetTarget(), nested)
+ if err != nil {
+ return err
+ }
+ un.str.WriteString(".")
+ }
+ un.str.WriteString(fun)
+ un.str.WriteString("(")
+ for i, arg := range args {
+ err := un.visit(arg)
+ if err != nil {
+ return err
+ }
+ if i < len(args)-1 {
+ un.str.WriteString(", ")
+ }
+ }
+ un.str.WriteString(")")
+ return nil
+}
+
+func (un *unparser) visitCallIndex(expr *exprpb.Expr) error {
+ return un.visitCallIndexInternal(expr, "[")
+}
+
+func (un *unparser) visitCallOptIndex(expr *exprpb.Expr) error {
+ return un.visitCallIndexInternal(expr, "[?")
+}
+
+func (un *unparser) visitCallIndexInternal(expr *exprpb.Expr, op string) error {
+ c := expr.GetCallExpr()
+ args := c.GetArgs()
+ nested := isBinaryOrTernaryOperator(args[0])
+ err := un.visitMaybeNested(args[0], nested)
+ if err != nil {
+ return err
+ }
+ un.str.WriteString(op)
+ err = un.visit(args[1])
+ if err != nil {
+ return err
+ }
+ un.str.WriteString("]")
+ return nil
+}
+
+func (un *unparser) visitCallUnary(expr *exprpb.Expr) error {
+ c := expr.GetCallExpr()
+ fun := c.GetFunction()
+ args := c.GetArgs()
+ unmangled, found := operators.FindReverse(fun)
+ if !found {
+ return fmt.Errorf("cannot unmangle operator: %s", fun)
+ }
+ un.str.WriteString(unmangled)
+ nested := isComplexOperator(args[0])
+ return un.visitMaybeNested(args[0], nested)
+}
+
+func (un *unparser) visitConst(expr *exprpb.Expr) error {
+ c := expr.GetConstExpr()
+ switch c.GetConstantKind().(type) {
+ case *exprpb.Constant_BoolValue:
+ un.str.WriteString(strconv.FormatBool(c.GetBoolValue()))
+ case *exprpb.Constant_BytesValue:
+ // bytes constants are surrounded with b""
+ b := c.GetBytesValue()
+ un.str.WriteString(`b"`)
+ un.str.WriteString(bytesToOctets(b))
+ un.str.WriteString(`"`)
+ case *exprpb.Constant_DoubleValue:
+ // represent the float using the minimum required digits
+ d := strconv.FormatFloat(c.GetDoubleValue(), 'g', -1, 64)
+ un.str.WriteString(d)
+ if !strings.Contains(d, ".") {
+ un.str.WriteString(".0")
+ }
+ case *exprpb.Constant_Int64Value:
+ i := strconv.FormatInt(c.GetInt64Value(), 10)
+ un.str.WriteString(i)
+ case *exprpb.Constant_NullValue:
+ un.str.WriteString("null")
+ case *exprpb.Constant_StringValue:
+ // strings will be double quoted with quotes escaped.
+ un.str.WriteString(strconv.Quote(c.GetStringValue()))
+ case *exprpb.Constant_Uint64Value:
+ // uint literals have a 'u' suffix.
+ ui := strconv.FormatUint(c.GetUint64Value(), 10)
+ un.str.WriteString(ui)
+ un.str.WriteString("u")
+ default:
+ return fmt.Errorf("unsupported constant: %v", expr)
+ }
+ return nil
+}
+
+func (un *unparser) visitIdent(expr *exprpb.Expr) error {
+ un.str.WriteString(expr.GetIdentExpr().GetName())
+ return nil
+}
+
+func (un *unparser) visitList(expr *exprpb.Expr) error {
+ l := expr.GetListExpr()
+ elems := l.GetElements()
+ optIndices := make(map[int]bool, len(elems))
+ for _, idx := range l.GetOptionalIndices() {
+ optIndices[int(idx)] = true
+ }
+ un.str.WriteString("[")
+ for i, elem := range elems {
+ if optIndices[i] {
+ un.str.WriteString("?")
+ }
+ err := un.visit(elem)
+ if err != nil {
+ return err
+ }
+ if i < len(elems)-1 {
+ un.str.WriteString(", ")
+ }
+ }
+ un.str.WriteString("]")
+ return nil
+}
+
+func (un *unparser) visitOptSelect(expr *exprpb.Expr) error {
+ c := expr.GetCallExpr()
+ args := c.GetArgs()
+ operand := args[0]
+ field := args[1].GetConstExpr().GetStringValue()
+ return un.visitSelectInternal(operand, false, ".?", field)
+}
+
+func (un *unparser) visitSelect(expr *exprpb.Expr) error {
+ sel := expr.GetSelectExpr()
+ return un.visitSelectInternal(sel.GetOperand(), sel.GetTestOnly(), ".", sel.GetField())
+}
+
+func (un *unparser) visitSelectInternal(operand *exprpb.Expr, testOnly bool, op string, field string) error {
+ // handle the case when the select expression was generated by the has() macro.
+ if testOnly {
+ un.str.WriteString("has(")
+ }
+ nested := !testOnly && isBinaryOrTernaryOperator(operand)
+ err := un.visitMaybeNested(operand, nested)
+ if err != nil {
+ return err
+ }
+ un.str.WriteString(op)
+ un.str.WriteString(field)
+ if testOnly {
+ un.str.WriteString(")")
+ }
+ return nil
+}
+
+func (un *unparser) visitStruct(expr *exprpb.Expr) error {
+ s := expr.GetStructExpr()
+ // If the message name is non-empty, then this should be treated as message construction.
+ if s.GetMessageName() != "" {
+ return un.visitStructMsg(expr)
+ }
+ // Otherwise, build a map.
+ return un.visitStructMap(expr)
+}
+
+func (un *unparser) visitStructMsg(expr *exprpb.Expr) error {
+ m := expr.GetStructExpr()
+ entries := m.GetEntries()
+ un.str.WriteString(m.GetMessageName())
+ un.str.WriteString("{")
+ for i, entry := range entries {
+ f := entry.GetFieldKey()
+ if entry.GetOptionalEntry() {
+ un.str.WriteString("?")
+ }
+ un.str.WriteString(f)
+ un.str.WriteString(": ")
+ v := entry.GetValue()
+ err := un.visit(v)
+ if err != nil {
+ return err
+ }
+ if i < len(entries)-1 {
+ un.str.WriteString(", ")
+ }
+ }
+ un.str.WriteString("}")
+ return nil
+}
+
+func (un *unparser) visitStructMap(expr *exprpb.Expr) error {
+ m := expr.GetStructExpr()
+ entries := m.GetEntries()
+ un.str.WriteString("{")
+ for i, entry := range entries {
+ k := entry.GetMapKey()
+ if entry.GetOptionalEntry() {
+ un.str.WriteString("?")
+ }
+ err := un.visit(k)
+ if err != nil {
+ return err
+ }
+ un.str.WriteString(": ")
+ v := entry.GetValue()
+ err = un.visit(v)
+ if err != nil {
+ return err
+ }
+ if i < len(entries)-1 {
+ un.str.WriteString(", ")
+ }
+ }
+ un.str.WriteString("}")
+ return nil
+}
+
+func (un *unparser) visitMaybeMacroCall(expr *exprpb.Expr) (bool, error) {
+ macroCalls := un.info.GetMacroCalls()
+ call, found := macroCalls[expr.GetId()]
+ if !found {
+ return false, nil
+ }
+ return true, un.visit(call)
+}
+
+func (un *unparser) visitMaybeNested(expr *exprpb.Expr, nested bool) error {
+ if nested {
+ un.str.WriteString("(")
+ }
+ err := un.visit(expr)
+ if err != nil {
+ return err
+ }
+ if nested {
+ un.str.WriteString(")")
+ }
+ return nil
+}
+
+// isLeftRecursive indicates whether the parser resolves the call in a left-recursive manner as
+// this can have an effect of how parentheses affect the order of operations in the AST.
+func isLeftRecursive(op string) bool {
+ return op != operators.LogicalAnd && op != operators.LogicalOr
+}
+
+// isSamePrecedence indicates whether the precedence of the input operator is the same as the
+// precedence of the (possible) operation represented in the input Expr.
+//
+// If the expr is not a Call, the result is false.
+func isSamePrecedence(op string, expr *exprpb.Expr) bool {
+ if expr.GetCallExpr() == nil {
+ return false
+ }
+ c := expr.GetCallExpr()
+ other := c.GetFunction()
+ return operators.Precedence(op) == operators.Precedence(other)
+}
+
+// isLowerPrecedence indicates whether the precedence of the input operator is lower precedence
+// than the (possible) operation represented in the input Expr.
+//
+// If the expr is not a Call, the result is false.
+func isLowerPrecedence(op string, expr *exprpb.Expr) bool {
+ c := expr.GetCallExpr()
+ other := c.GetFunction()
+ return operators.Precedence(op) < operators.Precedence(other)
+}
+
+// Indicates whether the expr is a complex operator, i.e., a call expression
+// with 2 or more arguments.
+func isComplexOperator(expr *exprpb.Expr) bool {
+ if expr.GetCallExpr() != nil && len(expr.GetCallExpr().GetArgs()) >= 2 {
+ return true
+ }
+ return false
+}
+
+// Indicates whether it is a complex operation compared to another.
+// expr is *not* considered complex if it is not a call expression or has
+// less than two arguments, or if it has a higher precedence than op.
+func isComplexOperatorWithRespectTo(op string, expr *exprpb.Expr) bool {
+ if expr.GetCallExpr() == nil || len(expr.GetCallExpr().GetArgs()) < 2 {
+ return false
+ }
+ return isLowerPrecedence(op, expr)
+}
+
+// Indicate whether this is a binary or ternary operator.
+func isBinaryOrTernaryOperator(expr *exprpb.Expr) bool {
+ if expr.GetCallExpr() == nil || len(expr.GetCallExpr().GetArgs()) < 2 {
+ return false
+ }
+ _, isBinaryOp := operators.FindReverseBinaryOperator(expr.GetCallExpr().GetFunction())
+ return isBinaryOp || isSamePrecedence(operators.Conditional, expr)
+}
+
+// bytesToOctets converts byte sequences to a string using a three digit octal encoded value
+// per byte.
+func bytesToOctets(byteVal []byte) string {
+ var b strings.Builder
+ for _, c := range byteVal {
+ fmt.Fprintf(&b, "\\%03o", c)
+ }
+ return b.String()
+}
+
+// writeOperatorWithWrapping outputs the operator and inserts a newline for operators configured
+// in the unparser options.
+func (un *unparser) writeOperatorWithWrapping(fun string, unmangled string) bool {
+ _, wrapOperatorExists := un.options.operatorsToWrapOn[fun]
+ lineLength := un.str.Len() - un.lastWrappedIndex + len(fun)
+
+ if wrapOperatorExists && lineLength >= un.options.wrapOnColumn {
+ un.lastWrappedIndex = un.str.Len()
+ // wrapAfterColumnLimit flag dictates whether the newline is placed
+ // before or after the operator
+ if un.options.wrapAfterColumnLimit {
+ // Input: a && b
+ // Output: a &&\nb
+ un.str.WriteString(" ")
+ un.str.WriteString(unmangled)
+ un.str.WriteString("\n")
+ } else {
+ // Input: a && b
+ // Output: a\n&& b
+ un.str.WriteString("\n")
+ un.str.WriteString(unmangled)
+ un.str.WriteString(" ")
+ }
+ return true
+ }
+ un.str.WriteString(" ")
+ un.str.WriteString(unmangled)
+ un.str.WriteString(" ")
+ return false
+}
+
+// Defined defaults for the unparser options
+var (
+ defaultWrapOnColumn = 80
+ defaultWrapAfterColumnLimit = true
+ defaultOperatorsToWrapOn = map[string]bool{
+ operators.LogicalAnd: true,
+ operators.LogicalOr: true,
+ }
+)
+
+// UnparserOption is a functional option for configuring the output formatting
+// of the Unparse function.
+type UnparserOption func(*unparserOption) (*unparserOption, error)
+
+// Internal representation of the UnparserOption type
+type unparserOption struct {
+ wrapOnColumn int
+ operatorsToWrapOn map[string]bool
+ wrapAfterColumnLimit bool
+}
+
+// WrapOnColumn wraps the output expression when its string length exceeds a specified limit
+// for operators set by WrapOnOperators function or by default, "&&" and "||" will be wrapped.
+//
+// Example usage:
+//
+// Unparse(expr, sourceInfo, WrapOnColumn(40), WrapOnOperators(Operators.LogicalAnd))
+//
+// This will insert a newline immediately after the logical AND operator for the below example input:
+//
+// Input:
+// 'my-principal-group' in request.auth.claims && request.auth.claims.iat > now - duration('5m')
+//
+// Output:
+// 'my-principal-group' in request.auth.claims &&
+// request.auth.claims.iat > now - duration('5m')
+func WrapOnColumn(col int) UnparserOption {
+ return func(opt *unparserOption) (*unparserOption, error) {
+ if col < 1 {
+ return nil, fmt.Errorf("Invalid unparser option. Wrap column value must be greater than or equal to 1. Got %v instead", col)
+ }
+ opt.wrapOnColumn = col
+ return opt, nil
+ }
+}
+
+// WrapOnOperators specifies which operators to perform word wrapping on an output expression when its string length
+// exceeds the column limit set by WrapOnColumn function.
+//
+// Word wrapping is supported on non-unary symbolic operators. Refer to operators.go for the full list
+//
+// This will replace any previously supplied operators instead of merging them.
+func WrapOnOperators(symbols ...string) UnparserOption {
+ return func(opt *unparserOption) (*unparserOption, error) {
+ opt.operatorsToWrapOn = make(map[string]bool)
+ for _, symbol := range symbols {
+ _, found := operators.FindReverse(symbol)
+ if !found {
+ return nil, fmt.Errorf("Invalid unparser option. Unsupported operator: %s", symbol)
+ }
+ arity := operators.Arity(symbol)
+ if arity < 2 {
+ return nil, fmt.Errorf("Invalid unparser option. Unary operators are unsupported: %s", symbol)
+ }
+
+ opt.operatorsToWrapOn[symbol] = true
+ }
+
+ return opt, nil
+ }
+}
+
+// WrapAfterColumnLimit dictates whether to insert a newline before or after the specified operator
+// when word wrapping is performed.
+//
+// Example usage:
+//
+// Unparse(expr, sourceInfo, WrapOnColumn(40), WrapOnOperators(Operators.LogicalAnd), WrapAfterColumnLimit(false))
+//
+// This will insert a newline immediately before the logical AND operator for the below example input, ensuring
+// that the length of a line never exceeds the specified column limit:
+//
+// Input:
+// 'my-principal-group' in request.auth.claims && request.auth.claims.iat > now - duration('5m')
+//
+// Output:
+// 'my-principal-group' in request.auth.claims
+// && request.auth.claims.iat > now - duration('5m')
+func WrapAfterColumnLimit(wrapAfter bool) UnparserOption {
+ return func(opt *unparserOption) (*unparserOption, error) {
+ opt.wrapAfterColumnLimit = wrapAfter
+ return opt, nil
+ }
+}
diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE
new file mode 100644
index 0000000..32017f8
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2017 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go
new file mode 100644
index 0000000..0f5b8a4
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/compare.go
@@ -0,0 +1,671 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cmp determines equality of values.
+//
+// This package is intended to be a more powerful and safer alternative to
+// [reflect.DeepEqual] for comparing whether two values are semantically equal.
+// It is intended to only be used in tests, as performance is not a goal and
+// it may panic if it cannot compare the values. Its propensity towards
+// panicking means that its unsuitable for production environments where a
+// spurious panic may be fatal.
+//
+// The primary features of cmp are:
+//
+// - When the default behavior of equality does not suit the test's needs,
+// custom equality functions can override the equality operation.
+// For example, an equality function may report floats as equal so long as
+// they are within some tolerance of each other.
+//
+// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method
+// to determine equality. This allows package authors to determine
+// the equality operation for the types that they define.
+//
+// - If no custom equality functions are used and no Equal method is defined,
+// equality is determined by recursively comparing the primitive kinds on
+// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual],
+// unexported fields are not compared by default; they result in panics
+// unless suppressed by using an [Ignore] option
+// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported])
+// or explicitly compared using the [Exporter] option.
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/diff"
+ "github.com/google/go-cmp/cmp/internal/function"
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+// TODO(≥go1.18): Use any instead of interface{}.
+
+// Equal reports whether x and y are equal by recursively applying the
+// following rules in the given order to x and y and all of their sub-values:
+//
+// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that
+// remain after applying all path filters, value filters, and type filters.
+// If at least one [Ignore] exists in S, then the comparison is ignored.
+// If the number of [Transformer] and [Comparer] options in S is non-zero,
+// then Equal panics because it is ambiguous which option to use.
+// If S contains a single [Transformer], then use that to transform
+// the current values and recursively call Equal on the output values.
+// If S contains a single [Comparer], then use that to compare the current values.
+// Otherwise, evaluation proceeds to the next rule.
+//
+// - If the values have an Equal method of the form "(T) Equal(T) bool" or
+// "(T) Equal(I) bool" where T is assignable to I, then use the result of
+// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
+// evaluation proceeds to the next rule.
+//
+// - Lastly, try to compare x and y based on their basic kinds.
+// Simple kinds like booleans, integers, floats, complex numbers, strings,
+// and channels are compared using the equivalent of the == operator in Go.
+// Functions are only equal if they are both nil, otherwise they are unequal.
+//
+// Structs are equal if recursively calling Equal on all fields report equal.
+// If a struct contains unexported fields, Equal panics unless an [Ignore] option
+// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field
+// or the [Exporter] option explicitly permits comparing the unexported field.
+//
+// Slices are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored slice or array elements report equal.
+// Empty non-nil slices and nil slices are not equal; to equate empty slices,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
+//
+// Maps are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored map entries report equal.
+// Map keys are equal according to the == operator.
+// To use custom comparisons for map keys, consider using
+// [github.com/google/go-cmp/cmp/cmpopts.SortMaps].
+// Empty non-nil maps and nil maps are not equal; to equate empty maps,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
+//
+// Pointers and interfaces are equal if they are both nil or both non-nil,
+// where they have the same underlying concrete type and recursively
+// calling Equal on the underlying values reports equal.
+//
+// Before recursing into a pointer, slice element, or map, the current path
+// is checked to detect whether the address has already been visited.
+// If there is a cycle, then the pointed at values are considered equal
+// only if both addresses were previously visited in the same path step.
+func Equal(x, y interface{}, opts ...Option) bool {
+ s := newState(opts)
+ s.compareAny(rootStep(x, y))
+ return s.result.Equal()
+}
+
+// Diff returns a human-readable report of the differences between two values:
+// y - x. It returns an empty string if and only if Equal returns true for the
+// same input values and options.
+//
+// The output is displayed as a literal in pseudo-Go syntax.
+// At the start of each line, a "-" prefix indicates an element removed from x,
+// a "+" prefix to indicates an element added from y, and the lack of a prefix
+// indicates an element common to both x and y. If possible, the output
+// uses fmt.Stringer.String or error.Error methods to produce more humanly
+// readable outputs. In such cases, the string is prefixed with either an
+// 's' or 'e' character, respectively, to indicate that the method was called.
+//
+// Do not depend on this output being stable. If you need the ability to
+// programmatically interpret the difference, consider using a custom Reporter.
+func Diff(x, y interface{}, opts ...Option) string {
+ s := newState(opts)
+
+ // Optimization: If there are no other reporters, we can optimize for the
+ // common case where the result is equal (and thus no reported difference).
+ // This avoids the expensive construction of a difference tree.
+ if len(s.reporters) == 0 {
+ s.compareAny(rootStep(x, y))
+ if s.result.Equal() {
+ return ""
+ }
+ s.result = diff.Result{} // Reset results
+ }
+
+ r := new(defaultReporter)
+ s.reporters = append(s.reporters, reporter{r})
+ s.compareAny(rootStep(x, y))
+ d := r.String()
+ if (d == "") != s.result.Equal() {
+ panic("inconsistent difference and equality results")
+ }
+ return d
+}
+
+// rootStep constructs the first path step. If x and y have differing types,
+// then they are stored within an empty interface type.
+func rootStep(x, y interface{}) PathStep {
+ vx := reflect.ValueOf(x)
+ vy := reflect.ValueOf(y)
+
+ // If the inputs are different types, auto-wrap them in an empty interface
+ // so that they have the same parent type.
+ var t reflect.Type
+ if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
+ t = anyType
+ if vx.IsValid() {
+ vvx := reflect.New(t).Elem()
+ vvx.Set(vx)
+ vx = vvx
+ }
+ if vy.IsValid() {
+ vvy := reflect.New(t).Elem()
+ vvy.Set(vy)
+ vy = vvy
+ }
+ } else {
+ t = vx.Type()
+ }
+
+ return &pathStep{t, vx, vy}
+}
+
+type state struct {
+ // These fields represent the "comparison state".
+ // Calling statelessCompare must not result in observable changes to these.
+ result diff.Result // The current result of comparison
+ curPath Path // The current path in the value tree
+ curPtrs pointerPath // The current set of visited pointers
+ reporters []reporter // Optional reporters
+
+ // recChecker checks for infinite cycles applying the same set of
+ // transformers upon the output of itself.
+ recChecker recChecker
+
+ // dynChecker triggers pseudo-random checks for option correctness.
+ // It is safe for statelessCompare to mutate this value.
+ dynChecker dynChecker
+
+ // These fields, once set by processOption, will not change.
+ exporters []exporter // List of exporters for structs with unexported fields
+ opts Options // List of all fundamental and filter options
+}
+
+func newState(opts []Option) *state {
+ // Always ensure a validator option exists to validate the inputs.
+ s := &state{opts: Options{validator{}}}
+ s.curPtrs.Init()
+ s.processOption(Options(opts))
+ return s
+}
+
+func (s *state) processOption(opt Option) {
+ switch opt := opt.(type) {
+ case nil:
+ case Options:
+ for _, o := range opt {
+ s.processOption(o)
+ }
+ case coreOption:
+ type filtered interface {
+ isFiltered() bool
+ }
+ if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
+ panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
+ }
+ s.opts = append(s.opts, opt)
+ case exporter:
+ s.exporters = append(s.exporters, opt)
+ case reporter:
+ s.reporters = append(s.reporters, opt)
+ default:
+ panic(fmt.Sprintf("unknown option %T", opt))
+ }
+}
+
+// statelessCompare compares two values and returns the result.
+// This function is stateless in that it does not alter the current result,
+// or output to any registered reporters.
+func (s *state) statelessCompare(step PathStep) diff.Result {
+ // We do not save and restore curPath and curPtrs because all of the
+ // compareX methods should properly push and pop from them.
+ // It is an implementation bug if the contents of the paths differ from
+ // when calling this function to when returning from it.
+
+ oldResult, oldReporters := s.result, s.reporters
+ s.result = diff.Result{} // Reset result
+ s.reporters = nil // Remove reporters to avoid spurious printouts
+ s.compareAny(step)
+ res := s.result
+ s.result, s.reporters = oldResult, oldReporters
+ return res
+}
+
+func (s *state) compareAny(step PathStep) {
+ // Update the path stack.
+ s.curPath.push(step)
+ defer s.curPath.pop()
+ for _, r := range s.reporters {
+ r.PushStep(step)
+ defer r.PopStep()
+ }
+ s.recChecker.Check(s.curPath)
+
+ // Cycle-detection for slice elements (see NOTE in compareSlice).
+ t := step.Type()
+ vx, vy := step.Values()
+ if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() {
+ px, py := vx.Addr(), vy.Addr()
+ if eq, visited := s.curPtrs.Push(px, py); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(px, py)
+ }
+
+ // Rule 1: Check whether an option applies on this node in the value tree.
+ if s.tryOptions(t, vx, vy) {
+ return
+ }
+
+ // Rule 2: Check whether the type has a valid Equal method.
+ if s.tryMethod(t, vx, vy) {
+ return
+ }
+
+ // Rule 3: Compare based on the underlying kind.
+ switch t.Kind() {
+ case reflect.Bool:
+ s.report(vx.Bool() == vy.Bool(), 0)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ s.report(vx.Int() == vy.Int(), 0)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ s.report(vx.Uint() == vy.Uint(), 0)
+ case reflect.Float32, reflect.Float64:
+ s.report(vx.Float() == vy.Float(), 0)
+ case reflect.Complex64, reflect.Complex128:
+ s.report(vx.Complex() == vy.Complex(), 0)
+ case reflect.String:
+ s.report(vx.String() == vy.String(), 0)
+ case reflect.Chan, reflect.UnsafePointer:
+ s.report(vx.Pointer() == vy.Pointer(), 0)
+ case reflect.Func:
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ case reflect.Struct:
+ s.compareStruct(t, vx, vy)
+ case reflect.Slice, reflect.Array:
+ s.compareSlice(t, vx, vy)
+ case reflect.Map:
+ s.compareMap(t, vx, vy)
+ case reflect.Ptr:
+ s.comparePtr(t, vx, vy)
+ case reflect.Interface:
+ s.compareInterface(t, vx, vy)
+ default:
+ panic(fmt.Sprintf("%v kind not handled", t.Kind()))
+ }
+}
+
+func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
+ // Evaluate all filters and apply the remaining options.
+ if opt := s.opts.filter(s, t, vx, vy); opt != nil {
+ opt.apply(s, vx, vy)
+ return true
+ }
+ return false
+}
+
+func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
+ // Check if this type even has an Equal method.
+ m, ok := t.MethodByName("Equal")
+ if !ok || !function.IsType(m.Type, function.EqualAssignable) {
+ return false
+ }
+
+ eq := s.callTTBFunc(m.Func, vx, vy)
+ s.report(eq, reportByMethod)
+ return true
+}
+
+func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
+ if !s.dynChecker.Next() {
+ return f.Call([]reflect.Value{v})[0]
+ }
+
+ // Run the function twice and ensure that we get the same results back.
+ // We run in goroutines so that the race detector (if enabled) can detect
+ // unsafe mutations to the input.
+ c := make(chan reflect.Value)
+ go detectRaces(c, f, v)
+ got := <-c
+ want := f.Call([]reflect.Value{v})[0]
+ if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
+ // To avoid false-positives with non-reflexive equality operations,
+ // we sanity check whether a value is equal to itself.
+ if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
+ return want
+ }
+ panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
+ }
+ return want
+}
+
+func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
+ if !s.dynChecker.Next() {
+ return f.Call([]reflect.Value{x, y})[0].Bool()
+ }
+
+ // Swapping the input arguments is sufficient to check that
+ // f is symmetric and deterministic.
+ // We run in goroutines so that the race detector (if enabled) can detect
+ // unsafe mutations to the input.
+ c := make(chan reflect.Value)
+ go detectRaces(c, f, y, x)
+ got := <-c
+ want := f.Call([]reflect.Value{x, y})[0].Bool()
+ if !got.IsValid() || got.Bool() != want {
+ panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
+ }
+ return want
+}
+
+func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
+ var ret reflect.Value
+ defer func() {
+ recover() // Ignore panics, let the other call to f panic instead
+ c <- ret
+ }()
+ ret = f.Call(vs)[0]
+}
+
+func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
+ var addr bool
+ var vax, vay reflect.Value // Addressable versions of vx and vy
+
+ var mayForce, mayForceInit bool
+ step := StructField{&structField{}}
+ for i := 0; i < t.NumField(); i++ {
+ step.typ = t.Field(i).Type
+ step.vx = vx.Field(i)
+ step.vy = vy.Field(i)
+ step.name = t.Field(i).Name
+ step.idx = i
+ step.unexported = !isExported(step.name)
+ if step.unexported {
+ if step.name == "_" {
+ continue
+ }
+ // Defer checking of unexported fields until later to give an
+ // Ignore a chance to ignore the field.
+ if !vax.IsValid() || !vay.IsValid() {
+ // For retrieveUnexportedField to work, the parent struct must
+ // be addressable. Create a new copy of the values if
+ // necessary to make them addressable.
+ addr = vx.CanAddr() || vy.CanAddr()
+ vax = makeAddressable(vx)
+ vay = makeAddressable(vy)
+ }
+ if !mayForceInit {
+ for _, xf := range s.exporters {
+ mayForce = mayForce || xf(t)
+ }
+ mayForceInit = true
+ }
+ step.mayForce = mayForce
+ step.paddr = addr
+ step.pvx = vax
+ step.pvy = vay
+ step.field = t.Field(i)
+ }
+ s.compareAny(step)
+ }
+}
+
+func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
+ isSlice := t.Kind() == reflect.Slice
+ if isSlice && (vx.IsNil() || vy.IsNil()) {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer
+ // since slices represents a list of pointers, rather than a single pointer.
+ // The pointer checking logic must be handled on a per-element basis
+ // in compareAny.
+ //
+ // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting
+ // pointer P, a length N, and a capacity C. Supposing each slice element has
+ // a memory size of M, then the slice is equivalent to the list of pointers:
+ // [P+i*M for i in range(N)]
+ //
+ // For example, v[:0] and v[:1] are slices with the same starting pointer,
+ // but they are clearly different values. Using the slice pointer alone
+ // violates the assumption that equal pointers implies equal values.
+
+ step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}}
+ withIndexes := func(ix, iy int) SliceIndex {
+ if ix >= 0 {
+ step.vx, step.xkey = vx.Index(ix), ix
+ } else {
+ step.vx, step.xkey = reflect.Value{}, -1
+ }
+ if iy >= 0 {
+ step.vy, step.ykey = vy.Index(iy), iy
+ } else {
+ step.vy, step.ykey = reflect.Value{}, -1
+ }
+ return step
+ }
+
+ // Ignore options are able to ignore missing elements in a slice.
+ // However, detecting these reliably requires an optimal differencing
+ // algorithm, for which diff.Difference is not.
+ //
+ // Instead, we first iterate through both slices to detect which elements
+ // would be ignored if standing alone. The index of non-discarded elements
+ // are stored in a separate slice, which diffing is then performed on.
+ var indexesX, indexesY []int
+ var ignoredX, ignoredY []bool
+ for ix := 0; ix < vx.Len(); ix++ {
+ ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
+ if !ignored {
+ indexesX = append(indexesX, ix)
+ }
+ ignoredX = append(ignoredX, ignored)
+ }
+ for iy := 0; iy < vy.Len(); iy++ {
+ ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
+ if !ignored {
+ indexesY = append(indexesY, iy)
+ }
+ ignoredY = append(ignoredY, ignored)
+ }
+
+ // Compute an edit-script for slices vx and vy (excluding ignored elements).
+ edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
+ return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
+ })
+
+ // Replay the ignore-scripts and the edit-script.
+ var ix, iy int
+ for ix < vx.Len() || iy < vy.Len() {
+ var e diff.EditType
+ switch {
+ case ix < len(ignoredX) && ignoredX[ix]:
+ e = diff.UniqueX
+ case iy < len(ignoredY) && ignoredY[iy]:
+ e = diff.UniqueY
+ default:
+ e, edits = edits[0], edits[1:]
+ }
+ switch e {
+ case diff.UniqueX:
+ s.compareAny(withIndexes(ix, -1))
+ ix++
+ case diff.UniqueY:
+ s.compareAny(withIndexes(-1, iy))
+ iy++
+ default:
+ s.compareAny(withIndexes(ix, iy))
+ ix++
+ iy++
+ }
+ }
+}
+
+func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // Cycle-detection for maps.
+ if eq, visited := s.curPtrs.Push(vx, vy); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(vx, vy)
+
+ // We combine and sort the two map keys so that we can perform the
+ // comparisons in a deterministic order.
+ step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
+ for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
+ step.vx = vx.MapIndex(k)
+ step.vy = vy.MapIndex(k)
+ step.key = k
+ if !step.vx.IsValid() && !step.vy.IsValid() {
+ // It is possible for both vx and vy to be invalid if the
+ // key contained a NaN value in it.
+ //
+ // Even with the ability to retrieve NaN keys in Go 1.12,
+ // there still isn't a sensible way to compare the values since
+ // a NaN key may map to multiple unordered values.
+ // The most reasonable way to compare NaNs would be to compare the
+ // set of values. However, this is impossible to do efficiently
+ // since set equality is provably an O(n^2) operation given only
+ // an Equal function. If we had a Less function or Hash function,
+ // this could be done in O(n*log(n)) or O(n), respectively.
+ //
+ // Rather than adding complex logic to deal with NaNs, make it
+ // the user's responsibility to compare such obscure maps.
+ const help = "consider providing a Comparer to compare the map"
+ panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
+ }
+ s.compareAny(step)
+ }
+}
+
+func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // Cycle-detection for pointers.
+ if eq, visited := s.curPtrs.Push(vx, vy); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(vx, vy)
+
+ vx, vy = vx.Elem(), vy.Elem()
+ s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
+}
+
+func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+ vx, vy = vx.Elem(), vy.Elem()
+ if vx.Type() != vy.Type() {
+ s.report(false, 0)
+ return
+ }
+ s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
+}
+
+func (s *state) report(eq bool, rf resultFlags) {
+ if rf&reportByIgnore == 0 {
+ if eq {
+ s.result.NumSame++
+ rf |= reportEqual
+ } else {
+ s.result.NumDiff++
+ rf |= reportUnequal
+ }
+ }
+ for _, r := range s.reporters {
+ r.Report(Result{flags: rf})
+ }
+}
+
+// recChecker tracks the state needed to periodically perform checks that
+// user provided transformers are not stuck in an infinitely recursive cycle.
+type recChecker struct{ next int }
+
+// Check scans the Path for any recursive transformers and panics when any
+// recursive transformers are detected. Note that the presence of a
+// recursive Transformer does not necessarily imply an infinite cycle.
+// As such, this check only activates after some minimal number of path steps.
+func (rc *recChecker) Check(p Path) {
+ const minLen = 1 << 16
+ if rc.next == 0 {
+ rc.next = minLen
+ }
+ if len(p) < rc.next {
+ return
+ }
+ rc.next <<= 1
+
+ // Check whether the same transformer has appeared at least twice.
+ var ss []string
+ m := map[Option]int{}
+ for _, ps := range p {
+ if t, ok := ps.(Transform); ok {
+ t := t.Option()
+ if m[t] == 1 { // Transformer was used exactly once before
+ tf := t.(*transformer).fnc.Type()
+ ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
+ }
+ m[t]++
+ }
+ }
+ if len(ss) > 0 {
+ const warning = "recursive set of Transformers detected"
+ const help = "consider using cmpopts.AcyclicTransformer"
+ set := strings.Join(ss, "\n\t")
+ panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
+ }
+}
+
+// dynChecker tracks the state needed to periodically perform checks that
+// user provided functions are symmetric and deterministic.
+// The zero value is safe for immediate use.
+type dynChecker struct{ curr, next int }
+
+// Next increments the state and reports whether a check should be performed.
+//
+// Checks occur every Nth function call, where N is a triangular number:
+//
+// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
+//
+// See https://en.wikipedia.org/wiki/Triangular_number
+//
+// This sequence ensures that the cost of checks drops significantly as
+// the number of functions calls grows larger.
+func (dc *dynChecker) Next() bool {
+ ok := dc.curr == dc.next
+ if ok {
+ dc.curr = 0
+ dc.next++
+ }
+ dc.curr++
+ return ok
+}
+
+// makeAddressable returns a value that is always addressable.
+// It returns the input verbatim if it is already addressable,
+// otherwise it creates a new value and returns an addressable copy.
+func makeAddressable(v reflect.Value) reflect.Value {
+ if v.CanAddr() {
+ return v
+ }
+ vc := reflect.New(v.Type()).Elem()
+ vc.Set(v)
+ return vc
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/export.go b/vendor/github.com/google/go-cmp/cmp/export.go
new file mode 100644
index 0000000..29f82fe
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/export.go
@@ -0,0 +1,31 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
+// a struct such that the value has read-write permissions.
+//
+// The parent struct, v, must be addressable, while f must be a StructField
+// describing the field to retrieve. If addr is false,
+// then the returned value will be shallowed copied to be non-addressable.
+func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
+ ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
+ if !addr {
+ // A field is addressable if and only if the struct is addressable.
+ // If the original parent value was not addressable, shallow copy the
+ // value to make it non-addressable to avoid leaking an implementation
+ // detail of how forcibly exporting a field works.
+ if ve.Kind() == reflect.Interface && ve.IsNil() {
+ return reflect.Zero(f.Type)
+ }
+ return reflect.ValueOf(ve.Interface()).Convert(f.Type)
+ }
+ return ve
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
new file mode 100644
index 0000000..36062a6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
@@ -0,0 +1,18 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cmp_debug
+// +build !cmp_debug
+
+package diff
+
+var debug debugger
+
+type debugger struct{}
+
+func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
+ return f
+}
+func (debugger) Update() {}
+func (debugger) Finish() {}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
new file mode 100644
index 0000000..a3b97a1
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
@@ -0,0 +1,123 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cmp_debug
+// +build cmp_debug
+
+package diff
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+)
+
+// The algorithm can be seen running in real-time by enabling debugging:
+// go test -tags=cmp_debug -v
+//
+// Example output:
+// === RUN TestDifference/#34
+// ┌───────────────────────────────┐
+// │ \ · · · · · · · · · · · · · · │
+// │ · # · · · · · · · · · · · · · │
+// │ · \ · · · · · · · · · · · · · │
+// │ · · \ · · · · · · · · · · · · │
+// │ · · · X # · · · · · · · · · · │
+// │ · · · # \ · · · · · · · · · · │
+// │ · · · · · # # · · · · · · · · │
+// │ · · · · · # \ · · · · · · · · │
+// │ · · · · · · · \ · · · · · · · │
+// │ · · · · · · · · \ · · · · · · │
+// │ · · · · · · · · · \ · · · · · │
+// │ · · · · · · · · · · \ · · # · │
+// │ · · · · · · · · · · · \ # # · │
+// │ · · · · · · · · · · · # # # · │
+// │ · · · · · · · · · · # # # # · │
+// │ · · · · · · · · · # # # # # · │
+// │ · · · · · · · · · · · · · · \ │
+// └───────────────────────────────┘
+// [.Y..M.XY......YXYXY.|]
+//
+// The grid represents the edit-graph where the horizontal axis represents
+// list X and the vertical axis represents list Y. The start of the two lists
+// is the top-left, while the ends are the bottom-right. The '·' represents
+// an unexplored node in the graph. The '\' indicates that the two symbols
+// from list X and Y are equal. The 'X' indicates that two symbols are similar
+// (but not exactly equal) to each other. The '#' indicates that the two symbols
+// are different (and not similar). The algorithm traverses this graph trying to
+// make the paths starting in the top-left and the bottom-right connect.
+//
+// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
+// the currently established path from the forward and reverse searches,
+// separated by a '|' character.
+
+const (
+ updateDelay = 100 * time.Millisecond
+ finishDelay = 500 * time.Millisecond
+ ansiTerminal = true // ANSI escape codes used to move terminal cursor
+)
+
+var debug debugger
+
+type debugger struct {
+ sync.Mutex
+ p1, p2 EditScript
+ fwdPath, revPath *EditScript
+ grid []byte
+ lines int
+}
+
+func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
+ dbg.Lock()
+ dbg.fwdPath, dbg.revPath = p1, p2
+ top := "┌─" + strings.Repeat("──", nx) + "┐\n"
+ row := "│ " + strings.Repeat("· ", nx) + "│\n"
+ btm := "└─" + strings.Repeat("──", nx) + "┘\n"
+ dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
+ dbg.lines = strings.Count(dbg.String(), "\n")
+ fmt.Print(dbg)
+
+ // Wrap the EqualFunc so that we can intercept each result.
+ return func(ix, iy int) (r Result) {
+ cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
+ for i := range cell {
+ cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
+ }
+ switch r = f(ix, iy); {
+ case r.Equal():
+ cell[0] = '\\'
+ case r.Similar():
+ cell[0] = 'X'
+ default:
+ cell[0] = '#'
+ }
+ return
+ }
+}
+
+func (dbg *debugger) Update() {
+ dbg.print(updateDelay)
+}
+
+func (dbg *debugger) Finish() {
+ dbg.print(finishDelay)
+ dbg.Unlock()
+}
+
+func (dbg *debugger) String() string {
+ dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
+ for i := len(*dbg.revPath) - 1; i >= 0; i-- {
+ dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
+ }
+ return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
+}
+
+func (dbg *debugger) print(d time.Duration) {
+ if ansiTerminal {
+ fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
+ }
+ fmt.Print(dbg)
+ time.Sleep(d)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
new file mode 100644
index 0000000..a248e54
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
@@ -0,0 +1,402 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package diff implements an algorithm for producing edit-scripts.
+// The edit-script is a sequence of operations needed to transform one list
+// of symbols into another (or vice-versa). The edits allowed are insertions,
+// deletions, and modifications. The summation of all edits is called the
+// Levenshtein distance as this problem is well-known in computer science.
+//
+// This package prioritizes performance over accuracy. That is, the run time
+// is more important than obtaining a minimal Levenshtein distance.
+package diff
+
+import (
+ "math/rand"
+ "time"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+)
+
+// EditType represents a single operation within an edit-script.
+type EditType uint8
+
+const (
+ // Identity indicates that a symbol pair is identical in both list X and Y.
+ Identity EditType = iota
+ // UniqueX indicates that a symbol only exists in X and not Y.
+ UniqueX
+ // UniqueY indicates that a symbol only exists in Y and not X.
+ UniqueY
+ // Modified indicates that a symbol pair is a modification of each other.
+ Modified
+)
+
+// EditScript represents the series of differences between two lists.
+type EditScript []EditType
+
+// String returns a human-readable string representing the edit-script where
+// Identity, UniqueX, UniqueY, and Modified are represented by the
+// '.', 'X', 'Y', and 'M' characters, respectively.
+func (es EditScript) String() string {
+ b := make([]byte, len(es))
+ for i, e := range es {
+ switch e {
+ case Identity:
+ b[i] = '.'
+ case UniqueX:
+ b[i] = 'X'
+ case UniqueY:
+ b[i] = 'Y'
+ case Modified:
+ b[i] = 'M'
+ default:
+ panic("invalid edit-type")
+ }
+ }
+ return string(b)
+}
+
+// stats returns a histogram of the number of each type of edit operation.
+func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
+ for _, e := range es {
+ switch e {
+ case Identity:
+ s.NI++
+ case UniqueX:
+ s.NX++
+ case UniqueY:
+ s.NY++
+ case Modified:
+ s.NM++
+ default:
+ panic("invalid edit-type")
+ }
+ }
+ return
+}
+
+// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
+// lists X and Y are equal.
+func (es EditScript) Dist() int { return len(es) - es.stats().NI }
+
+// LenX is the length of the X list.
+func (es EditScript) LenX() int { return len(es) - es.stats().NY }
+
+// LenY is the length of the Y list.
+func (es EditScript) LenY() int { return len(es) - es.stats().NX }
+
+// EqualFunc reports whether the symbols at indexes ix and iy are equal.
+// When called by Difference, the index is guaranteed to be within nx and ny.
+type EqualFunc func(ix int, iy int) Result
+
+// Result is the result of comparison.
+// NumSame is the number of sub-elements that are equal.
+// NumDiff is the number of sub-elements that are not equal.
+type Result struct{ NumSame, NumDiff int }
+
+// BoolResult returns a Result that is either Equal or not Equal.
+func BoolResult(b bool) Result {
+ if b {
+ return Result{NumSame: 1} // Equal, Similar
+ } else {
+ return Result{NumDiff: 2} // Not Equal, not Similar
+ }
+}
+
+// Equal indicates whether the symbols are equal. Two symbols are equal
+// if and only if NumDiff == 0. If Equal, then they are also Similar.
+func (r Result) Equal() bool { return r.NumDiff == 0 }
+
+// Similar indicates whether two symbols are similar and may be represented
+// by using the Modified type. As a special case, we consider binary comparisons
+// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
+//
+// The exact ratio of NumSame to NumDiff to determine similarity may change.
+func (r Result) Similar() bool {
+ // Use NumSame+1 to offset NumSame so that binary comparisons are similar.
+ return r.NumSame+1 >= r.NumDiff
+}
+
+var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
+
+// Difference reports whether two lists of lengths nx and ny are equal
+// given the definition of equality provided as f.
+//
+// This function returns an edit-script, which is a sequence of operations
+// needed to convert one list into the other. The following invariants for
+// the edit-script are maintained:
+// - eq == (es.Dist()==0)
+// - nx == es.LenX()
+// - ny == es.LenY()
+//
+// This algorithm is not guaranteed to be an optimal solution (i.e., one that
+// produces an edit-script with a minimal Levenshtein distance). This algorithm
+// favors performance over optimality. The exact output is not guaranteed to
+// be stable and may change over time.
+func Difference(nx, ny int, f EqualFunc) (es EditScript) {
+ // This algorithm is based on traversing what is known as an "edit-graph".
+ // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
+ // by Eugene W. Myers. Since D can be as large as N itself, this is
+ // effectively O(N^2). Unlike the algorithm from that paper, we are not
+ // interested in the optimal path, but at least some "decent" path.
+ //
+ // For example, let X and Y be lists of symbols:
+ // X = [A B C A B B A]
+ // Y = [C B A B A C]
+ //
+ // The edit-graph can be drawn as the following:
+ // A B C A B B A
+ // ┌─────────────┐
+ // C │_|_|\|_|_|_|_│ 0
+ // B │_|\|_|_|\|\|_│ 1
+ // A │\|_|_|\|_|_|\│ 2
+ // B │_|\|_|_|\|\|_│ 3
+ // A │\|_|_|\|_|_|\│ 4
+ // C │ | |\| | | | │ 5
+ // └─────────────┘ 6
+ // 0 1 2 3 4 5 6 7
+ //
+ // List X is written along the horizontal axis, while list Y is written
+ // along the vertical axis. At any point on this grid, if the symbol in
+ // list X matches the corresponding symbol in list Y, then a '\' is drawn.
+ // The goal of any minimal edit-script algorithm is to find a path from the
+ // top-left corner to the bottom-right corner, while traveling through the
+ // fewest horizontal or vertical edges.
+ // A horizontal edge is equivalent to inserting a symbol from list X.
+ // A vertical edge is equivalent to inserting a symbol from list Y.
+ // A diagonal edge is equivalent to a matching symbol between both X and Y.
+
+ // Invariants:
+ // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
+ // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
+ //
+ // In general:
+ // - fwdFrontier.X < revFrontier.X
+ // - fwdFrontier.Y < revFrontier.Y
+ //
+ // Unless, it is time for the algorithm to terminate.
+ fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
+ revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
+ fwdFrontier := fwdPath.point // Forward search frontier
+ revFrontier := revPath.point // Reverse search frontier
+
+ // Search budget bounds the cost of searching for better paths.
+ // The longest sequence of non-matching symbols that can be tolerated is
+ // approximately the square-root of the search budget.
+ searchBudget := 4 * (nx + ny) // O(n)
+
+ // Running the tests with the "cmp_debug" build tag prints a visualization
+ // of the algorithm running in real-time. This is educational for
+ // understanding how the algorithm works. See debug_enable.go.
+ f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
+
+ // The algorithm below is a greedy, meet-in-the-middle algorithm for
+ // computing sub-optimal edit-scripts between two lists.
+ //
+ // The algorithm is approximately as follows:
+ // - Searching for differences switches back-and-forth between
+ // a search that starts at the beginning (the top-left corner), and
+ // a search that starts at the end (the bottom-right corner).
+ // The goal of the search is connect with the search
+ // from the opposite corner.
+ // - As we search, we build a path in a greedy manner,
+ // where the first match seen is added to the path (this is sub-optimal,
+ // but provides a decent result in practice). When matches are found,
+ // we try the next pair of symbols in the lists and follow all matches
+ // as far as possible.
+ // - When searching for matches, we search along a diagonal going through
+ // through the "frontier" point. If no matches are found,
+ // we advance the frontier towards the opposite corner.
+ // - This algorithm terminates when either the X coordinates or the
+ // Y coordinates of the forward and reverse frontier points ever intersect.
+
+ // This algorithm is correct even if searching only in the forward direction
+ // or in the reverse direction. We do both because it is commonly observed
+ // that two lists commonly differ because elements were added to the front
+ // or end of the other list.
+ //
+ // Non-deterministically start with either the forward or reverse direction
+ // to introduce some deliberate instability so that we have the flexibility
+ // to change this algorithm in the future.
+ if flags.Deterministic || randBool {
+ goto forwardSearch
+ } else {
+ goto reverseSearch
+ }
+
+forwardSearch:
+ {
+ // Forward search from the beginning.
+ if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+ goto finishSearch
+ }
+ for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+ // Search in a diagonal pattern for a match.
+ z := zigzag(i)
+ p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
+ switch {
+ case p.X >= revPath.X || p.Y < fwdPath.Y:
+ stop1 = true // Hit top-right corner
+ case p.Y >= revPath.Y || p.X < fwdPath.X:
+ stop2 = true // Hit bottom-left corner
+ case f(p.X, p.Y).Equal():
+ // Match found, so connect the path to this point.
+ fwdPath.connect(p, f)
+ fwdPath.append(Identity)
+ // Follow sequence of matches as far as possible.
+ for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+ if !f(fwdPath.X, fwdPath.Y).Equal() {
+ break
+ }
+ fwdPath.append(Identity)
+ }
+ fwdFrontier = fwdPath.point
+ stop1, stop2 = true, true
+ default:
+ searchBudget-- // Match not found
+ }
+ debug.Update()
+ }
+ // Advance the frontier towards reverse point.
+ if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
+ fwdFrontier.X++
+ } else {
+ fwdFrontier.Y++
+ }
+ goto reverseSearch
+ }
+
+reverseSearch:
+ {
+ // Reverse search from the end.
+ if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+ goto finishSearch
+ }
+ for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+ // Search in a diagonal pattern for a match.
+ z := zigzag(i)
+ p := point{revFrontier.X - z, revFrontier.Y + z}
+ switch {
+ case fwdPath.X >= p.X || revPath.Y < p.Y:
+ stop1 = true // Hit bottom-left corner
+ case fwdPath.Y >= p.Y || revPath.X < p.X:
+ stop2 = true // Hit top-right corner
+ case f(p.X-1, p.Y-1).Equal():
+ // Match found, so connect the path to this point.
+ revPath.connect(p, f)
+ revPath.append(Identity)
+ // Follow sequence of matches as far as possible.
+ for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+ if !f(revPath.X-1, revPath.Y-1).Equal() {
+ break
+ }
+ revPath.append(Identity)
+ }
+ revFrontier = revPath.point
+ stop1, stop2 = true, true
+ default:
+ searchBudget-- // Match not found
+ }
+ debug.Update()
+ }
+ // Advance the frontier towards forward point.
+ if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
+ revFrontier.X--
+ } else {
+ revFrontier.Y--
+ }
+ goto forwardSearch
+ }
+
+finishSearch:
+ // Join the forward and reverse paths and then append the reverse path.
+ fwdPath.connect(revPath.point, f)
+ for i := len(revPath.es) - 1; i >= 0; i-- {
+ t := revPath.es[i]
+ revPath.es = revPath.es[:i]
+ fwdPath.append(t)
+ }
+ debug.Finish()
+ return fwdPath.es
+}
+
+type path struct {
+ dir int // +1 if forward, -1 if reverse
+ point // Leading point of the EditScript path
+ es EditScript
+}
+
+// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
+// to the edit-script to connect p.point to dst.
+func (p *path) connect(dst point, f EqualFunc) {
+ if p.dir > 0 {
+ // Connect in forward direction.
+ for dst.X > p.X && dst.Y > p.Y {
+ switch r := f(p.X, p.Y); {
+ case r.Equal():
+ p.append(Identity)
+ case r.Similar():
+ p.append(Modified)
+ case dst.X-p.X >= dst.Y-p.Y:
+ p.append(UniqueX)
+ default:
+ p.append(UniqueY)
+ }
+ }
+ for dst.X > p.X {
+ p.append(UniqueX)
+ }
+ for dst.Y > p.Y {
+ p.append(UniqueY)
+ }
+ } else {
+ // Connect in reverse direction.
+ for p.X > dst.X && p.Y > dst.Y {
+ switch r := f(p.X-1, p.Y-1); {
+ case r.Equal():
+ p.append(Identity)
+ case r.Similar():
+ p.append(Modified)
+ case p.Y-dst.Y >= p.X-dst.X:
+ p.append(UniqueY)
+ default:
+ p.append(UniqueX)
+ }
+ }
+ for p.X > dst.X {
+ p.append(UniqueX)
+ }
+ for p.Y > dst.Y {
+ p.append(UniqueY)
+ }
+ }
+}
+
+func (p *path) append(t EditType) {
+ p.es = append(p.es, t)
+ switch t {
+ case Identity, Modified:
+ p.add(p.dir, p.dir)
+ case UniqueX:
+ p.add(p.dir, 0)
+ case UniqueY:
+ p.add(0, p.dir)
+ }
+ debug.Update()
+}
+
+type point struct{ X, Y int }
+
+func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
+
+// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
+//
+// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
+func zigzag(x int) int {
+ if x&1 != 0 {
+ x = ^x
+ }
+ return x >> 1
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
new file mode 100644
index 0000000..d8e459c
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
@@ -0,0 +1,9 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+// Deterministic controls whether the output of Diff should be deterministic.
+// This is only used for testing.
+var Deterministic bool
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
new file mode 100644
index 0000000..d127d43
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
@@ -0,0 +1,99 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package function provides functionality for identifying function types.
+package function
+
+import (
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+type funcType int
+
+const (
+ _ funcType = iota
+
+ tbFunc // func(T) bool
+ ttbFunc // func(T, T) bool
+ trbFunc // func(T, R) bool
+ tibFunc // func(T, I) bool
+ trFunc // func(T) R
+
+ Equal = ttbFunc // func(T, T) bool
+ EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
+ Transformer = trFunc // func(T) R
+ ValueFilter = ttbFunc // func(T, T) bool
+ Less = ttbFunc // func(T, T) bool
+ ValuePredicate = tbFunc // func(T) bool
+ KeyValuePredicate = trbFunc // func(T, R) bool
+)
+
+var boolType = reflect.TypeOf(true)
+
+// IsType reports whether the reflect.Type is of the specified function type.
+func IsType(t reflect.Type, ft funcType) bool {
+ if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
+ return false
+ }
+ ni, no := t.NumIn(), t.NumOut()
+ switch ft {
+ case tbFunc: // func(T) bool
+ if ni == 1 && no == 1 && t.Out(0) == boolType {
+ return true
+ }
+ case ttbFunc: // func(T, T) bool
+ if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
+ return true
+ }
+ case trbFunc: // func(T, R) bool
+ if ni == 2 && no == 1 && t.Out(0) == boolType {
+ return true
+ }
+ case tibFunc: // func(T, I) bool
+ if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
+ return true
+ }
+ case trFunc: // func(T) R
+ if ni == 1 && no == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
+
+// NameOf returns the name of the function value.
+func NameOf(v reflect.Value) string {
+ fnc := runtime.FuncForPC(v.Pointer())
+ if fnc == nil {
+ return ""
+ }
+ fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
+
+ // Method closures have a "-fm" suffix.
+ fullName = strings.TrimSuffix(fullName, "-fm")
+
+ var name string
+ for len(fullName) > 0 {
+ inParen := strings.HasSuffix(fullName, ")")
+ fullName = strings.TrimSuffix(fullName, ")")
+
+ s := lastIdentRx.FindString(fullName)
+ if s == "" {
+ break
+ }
+ name = s + "." + name
+ fullName = strings.TrimSuffix(fullName, s)
+
+ if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
+ fullName = fullName[:i]
+ }
+ fullName = strings.TrimSuffix(fullName, ".")
+ }
+ return strings.TrimSuffix(name, ".")
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go
new file mode 100644
index 0000000..7b498bb
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go
@@ -0,0 +1,164 @@
+// Copyright 2020, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "reflect"
+ "strconv"
+)
+
+var anyType = reflect.TypeOf((*interface{})(nil)).Elem()
+
+// TypeString is nearly identical to reflect.Type.String,
+// but has an additional option to specify that full type names be used.
+func TypeString(t reflect.Type, qualified bool) string {
+ return string(appendTypeName(nil, t, qualified, false))
+}
+
+func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
+ // BUG: Go reflection provides no way to disambiguate two named types
+ // of the same name and within the same package,
+ // but declared within the namespace of different functions.
+
+ // Use the "any" alias instead of "interface{}" for better readability.
+ if t == anyType {
+ return append(b, "any"...)
+ }
+
+ // Named type.
+ if t.Name() != "" {
+ if qualified && t.PkgPath() != "" {
+ b = append(b, '"')
+ b = append(b, t.PkgPath()...)
+ b = append(b, '"')
+ b = append(b, '.')
+ b = append(b, t.Name()...)
+ } else {
+ b = append(b, t.String()...)
+ }
+ return b
+ }
+
+ // Unnamed type.
+ switch k := t.Kind(); k {
+ case reflect.Bool, reflect.String, reflect.UnsafePointer,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ b = append(b, k.String()...)
+ case reflect.Chan:
+ if t.ChanDir() == reflect.RecvDir {
+ b = append(b, "<-"...)
+ }
+ b = append(b, "chan"...)
+ if t.ChanDir() == reflect.SendDir {
+ b = append(b, "<-"...)
+ }
+ b = append(b, ' ')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Func:
+ if !elideFunc {
+ b = append(b, "func"...)
+ }
+ b = append(b, '(')
+ for i := 0; i < t.NumIn(); i++ {
+ if i > 0 {
+ b = append(b, ", "...)
+ }
+ if i == t.NumIn()-1 && t.IsVariadic() {
+ b = append(b, "..."...)
+ b = appendTypeName(b, t.In(i).Elem(), qualified, false)
+ } else {
+ b = appendTypeName(b, t.In(i), qualified, false)
+ }
+ }
+ b = append(b, ')')
+ switch t.NumOut() {
+ case 0:
+ // Do nothing
+ case 1:
+ b = append(b, ' ')
+ b = appendTypeName(b, t.Out(0), qualified, false)
+ default:
+ b = append(b, " ("...)
+ for i := 0; i < t.NumOut(); i++ {
+ if i > 0 {
+ b = append(b, ", "...)
+ }
+ b = appendTypeName(b, t.Out(i), qualified, false)
+ }
+ b = append(b, ')')
+ }
+ case reflect.Struct:
+ b = append(b, "struct{ "...)
+ for i := 0; i < t.NumField(); i++ {
+ if i > 0 {
+ b = append(b, "; "...)
+ }
+ sf := t.Field(i)
+ if !sf.Anonymous {
+ if qualified && sf.PkgPath != "" {
+ b = append(b, '"')
+ b = append(b, sf.PkgPath...)
+ b = append(b, '"')
+ b = append(b, '.')
+ }
+ b = append(b, sf.Name...)
+ b = append(b, ' ')
+ }
+ b = appendTypeName(b, sf.Type, qualified, false)
+ if sf.Tag != "" {
+ b = append(b, ' ')
+ b = strconv.AppendQuote(b, string(sf.Tag))
+ }
+ }
+ if b[len(b)-1] == ' ' {
+ b = b[:len(b)-1]
+ } else {
+ b = append(b, ' ')
+ }
+ b = append(b, '}')
+ case reflect.Slice, reflect.Array:
+ b = append(b, '[')
+ if k == reflect.Array {
+ b = strconv.AppendUint(b, uint64(t.Len()), 10)
+ }
+ b = append(b, ']')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Map:
+ b = append(b, "map["...)
+ b = appendTypeName(b, t.Key(), qualified, false)
+ b = append(b, ']')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Ptr:
+ b = append(b, '*')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Interface:
+ b = append(b, "interface{ "...)
+ for i := 0; i < t.NumMethod(); i++ {
+ if i > 0 {
+ b = append(b, "; "...)
+ }
+ m := t.Method(i)
+ if qualified && m.PkgPath != "" {
+ b = append(b, '"')
+ b = append(b, m.PkgPath...)
+ b = append(b, '"')
+ b = append(b, '.')
+ }
+ b = append(b, m.Name...)
+ b = appendTypeName(b, m.Type, qualified, true)
+ }
+ if b[len(b)-1] == ' ' {
+ b = b[:len(b)-1]
+ } else {
+ b = append(b, ' ')
+ }
+ b = append(b, '}')
+ default:
+ panic("invalid kind: " + k.String())
+ }
+ return b
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
new file mode 100644
index 0000000..e5dfff6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
@@ -0,0 +1,34 @@
+// Copyright 2018, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// Pointer is an opaque typed pointer and is guaranteed to be comparable.
+type Pointer struct {
+ p unsafe.Pointer
+ t reflect.Type
+}
+
+// PointerOf returns a Pointer from v, which must be a
+// reflect.Ptr, reflect.Slice, or reflect.Map.
+func PointerOf(v reflect.Value) Pointer {
+ // The proper representation of a pointer is unsafe.Pointer,
+ // which is necessary if the GC ever uses a moving collector.
+ return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
+}
+
+// IsNil reports whether the pointer is nil.
+func (p Pointer) IsNil() bool {
+ return p.p == nil
+}
+
+// Uintptr returns the pointer as a uintptr.
+func (p Pointer) Uintptr() uintptr {
+ return uintptr(p.p)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
new file mode 100644
index 0000000..98533b0
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
@@ -0,0 +1,106 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+)
+
+// SortKeys sorts a list of map keys, deduplicating keys if necessary.
+// The type of each value must be comparable.
+func SortKeys(vs []reflect.Value) []reflect.Value {
+ if len(vs) == 0 {
+ return vs
+ }
+
+ // Sort the map keys.
+ sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
+
+ // Deduplicate keys (fails for NaNs).
+ vs2 := vs[:1]
+ for _, v := range vs[1:] {
+ if isLess(vs2[len(vs2)-1], v) {
+ vs2 = append(vs2, v)
+ }
+ }
+ return vs2
+}
+
+// isLess is a generic function for sorting arbitrary map keys.
+// The inputs must be of the same type and must be comparable.
+func isLess(x, y reflect.Value) bool {
+ switch x.Type().Kind() {
+ case reflect.Bool:
+ return !x.Bool() && y.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return x.Int() < y.Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return x.Uint() < y.Uint()
+ case reflect.Float32, reflect.Float64:
+ // NOTE: This does not sort -0 as less than +0
+ // since Go maps treat -0 and +0 as equal keys.
+ fx, fy := x.Float(), y.Float()
+ return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
+ case reflect.Complex64, reflect.Complex128:
+ cx, cy := x.Complex(), y.Complex()
+ rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
+ if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
+ return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
+ }
+ return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
+ case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
+ return x.Pointer() < y.Pointer()
+ case reflect.String:
+ return x.String() < y.String()
+ case reflect.Array:
+ for i := 0; i < x.Len(); i++ {
+ if isLess(x.Index(i), y.Index(i)) {
+ return true
+ }
+ if isLess(y.Index(i), x.Index(i)) {
+ return false
+ }
+ }
+ return false
+ case reflect.Struct:
+ for i := 0; i < x.NumField(); i++ {
+ if isLess(x.Field(i), y.Field(i)) {
+ return true
+ }
+ if isLess(y.Field(i), x.Field(i)) {
+ return false
+ }
+ }
+ return false
+ case reflect.Interface:
+ vx, vy := x.Elem(), y.Elem()
+ if !vx.IsValid() || !vy.IsValid() {
+ return !vx.IsValid() && vy.IsValid()
+ }
+ tx, ty := vx.Type(), vy.Type()
+ if tx == ty {
+ return isLess(x.Elem(), y.Elem())
+ }
+ if tx.Kind() != ty.Kind() {
+ return vx.Kind() < vy.Kind()
+ }
+ if tx.String() != ty.String() {
+ return tx.String() < ty.String()
+ }
+ if tx.PkgPath() != ty.PkgPath() {
+ return tx.PkgPath() < ty.PkgPath()
+ }
+ // This can happen in rare situations, so we fallback to just comparing
+ // the unique pointer for a reflect.Type. This guarantees deterministic
+ // ordering within a program, but it is obviously not stable.
+ return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
+ default:
+ // Must be Func, Map, or Slice; which are not comparable.
+ panic(fmt.Sprintf("%T is not comparable", x.Type()))
+ }
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
new file mode 100644
index 0000000..754496f
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -0,0 +1,554 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/function"
+)
+
+// Option configures for specific behavior of [Equal] and [Diff]. In particular,
+// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]),
+// configure how equality is determined.
+//
+// The fundamental options may be composed with filters ([FilterPath] and
+// [FilterValues]) to control the scope over which they are applied.
+//
+// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions
+// for creating options that may be used with [Equal] and [Diff].
+type Option interface {
+ // filter applies all filters and returns the option that remains.
+ // Each option may only read s.curPath and call s.callTTBFunc.
+ //
+ // An Options is returned only if multiple comparers or transformers
+ // can apply simultaneously and will only contain values of those types
+ // or sub-Options containing values of those types.
+ filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
+}
+
+// applicableOption represents the following types:
+//
+// Fundamental: ignore | validator | *comparer | *transformer
+// Grouping: Options
+type applicableOption interface {
+ Option
+
+ // apply executes the option, which may mutate s or panic.
+ apply(s *state, vx, vy reflect.Value)
+}
+
+// coreOption represents the following types:
+//
+// Fundamental: ignore | validator | *comparer | *transformer
+// Filters: *pathFilter | *valuesFilter
+type coreOption interface {
+ Option
+ isCore()
+}
+
+type core struct{}
+
+func (core) isCore() {}
+
+// Options is a list of [Option] values that also satisfies the [Option] interface.
+// Helper comparison packages may return an Options value when packing multiple
+// [Option] values into a single [Option]. When this package processes an Options,
+// it will be implicitly expanded into a flat list.
+//
+// Applying a filter on an Options is equivalent to applying that same filter
+// on all individual options held within.
+type Options []Option
+
+func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
+ for _, opt := range opts {
+ switch opt := opt.filter(s, t, vx, vy); opt.(type) {
+ case ignore:
+ return ignore{} // Only ignore can short-circuit evaluation
+ case validator:
+ out = validator{} // Takes precedence over comparer or transformer
+ case *comparer, *transformer, Options:
+ switch out.(type) {
+ case nil:
+ out = opt
+ case validator:
+ // Keep validator
+ case *comparer, *transformer, Options:
+ out = Options{out, opt} // Conflicting comparers or transformers
+ }
+ }
+ }
+ return out
+}
+
+func (opts Options) apply(s *state, _, _ reflect.Value) {
+ const warning = "ambiguous set of applicable options"
+ const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
+ var ss []string
+ for _, opt := range flattenOptions(nil, opts) {
+ ss = append(ss, fmt.Sprint(opt))
+ }
+ set := strings.Join(ss, "\n\t")
+ panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
+}
+
+func (opts Options) String() string {
+ var ss []string
+ for _, opt := range opts {
+ ss = append(ss, fmt.Sprint(opt))
+ }
+ return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
+}
+
+// FilterPath returns a new [Option] where opt is only evaluated if filter f
+// returns true for the current [Path] in the value tree.
+//
+// This filter is called even if a slice element or map entry is missing and
+// provides an opportunity to ignore such cases. The filter function must be
+// symmetric such that the filter result is identical regardless of whether the
+// missing value is from x or y.
+//
+// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
+// a previously filtered [Option].
+func FilterPath(f func(Path) bool, opt Option) Option {
+ if f == nil {
+ panic("invalid path filter function")
+ }
+ if opt := normalizeOption(opt); opt != nil {
+ return &pathFilter{fnc: f, opt: opt}
+ }
+ return nil
+}
+
+type pathFilter struct {
+ core
+ fnc func(Path) bool
+ opt Option
+}
+
+func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
+ if f.fnc(s.curPath) {
+ return f.opt.filter(s, t, vx, vy)
+ }
+ return nil
+}
+
+func (f pathFilter) String() string {
+ return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
+}
+
+// FilterValues returns a new [Option] where opt is only evaluated if filter f,
+// which is a function of the form "func(T, T) bool", returns true for the
+// current pair of values being compared. If either value is invalid or
+// the type of the values is not assignable to T, then this filter implicitly
+// returns false.
+//
+// The filter function must be
+// symmetric (i.e., agnostic to the order of the inputs) and
+// deterministic (i.e., produces the same result when given the same inputs).
+// If T is an interface, it is possible that f is called with two values with
+// different concrete types that both implement T.
+//
+// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
+// a previously filtered [Option].
+func FilterValues(f interface{}, opt Option) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
+ panic(fmt.Sprintf("invalid values filter function: %T", f))
+ }
+ if opt := normalizeOption(opt); opt != nil {
+ vf := &valuesFilter{fnc: v, opt: opt}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ vf.typ = ti
+ }
+ return vf
+ }
+ return nil
+}
+
+type valuesFilter struct {
+ core
+ typ reflect.Type // T
+ fnc reflect.Value // func(T, T) bool
+ opt Option
+}
+
+func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
+ if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
+ return nil
+ }
+ if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
+ return f.opt.filter(s, t, vx, vy)
+ }
+ return nil
+}
+
+func (f valuesFilter) String() string {
+ return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
+}
+
+// Ignore is an [Option] that causes all comparisons to be ignored.
+// This value is intended to be combined with [FilterPath] or [FilterValues].
+// It is an error to pass an unfiltered Ignore option to [Equal].
+func Ignore() Option { return ignore{} }
+
+type ignore struct{ core }
+
+func (ignore) isFiltered() bool { return false }
+func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
+func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
+func (ignore) String() string { return "Ignore()" }
+
+// validator is a sentinel Option type to indicate that some options could not
+// be evaluated due to unexported fields, missing slice elements, or
+// missing map entries. Both values are validator only for unexported fields.
+type validator struct{ core }
+
+func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
+ if !vx.IsValid() || !vy.IsValid() {
+ return validator{}
+ }
+ if !vx.CanInterface() || !vy.CanInterface() {
+ return validator{}
+ }
+ return nil
+}
+func (validator) apply(s *state, vx, vy reflect.Value) {
+ // Implies missing slice element or map entry.
+ if !vx.IsValid() || !vy.IsValid() {
+ s.report(vx.IsValid() == vy.IsValid(), 0)
+ return
+ }
+
+ // Unable to Interface implies unexported field without visibility access.
+ if !vx.CanInterface() || !vy.CanInterface() {
+ help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
+ var name string
+ if t := s.curPath.Index(-2).Type(); t.Name() != "" {
+ // Named type with unexported fields.
+ name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
+ if _, ok := reflect.New(t).Interface().(error); ok {
+ help = "consider using cmpopts.EquateErrors to compare error values"
+ } else if t.Comparable() {
+ help = "consider using cmpopts.EquateComparable to compare comparable Go types"
+ }
+ } else {
+ // Unnamed type with unexported fields. Derive PkgPath from field.
+ var pkgPath string
+ for i := 0; i < t.NumField() && pkgPath == ""; i++ {
+ pkgPath = t.Field(i).PkgPath
+ }
+ name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int })
+ }
+ panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help))
+ }
+
+ panic("not reachable")
+}
+
+// identRx represents a valid identifier according to the Go specification.
+const identRx = `[_\p{L}][_\p{L}\p{N}]*`
+
+var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
+
+// Transformer returns an [Option] that applies a transformation function that
+// converts values of a certain type into that of another.
+//
+// The transformer f must be a function "func(T) R" that converts values of
+// type T to those of type R and is implicitly filtered to input values
+// assignable to T. The transformer must not mutate T in any way.
+//
+// To help prevent some cases of infinite recursive cycles applying the
+// same transform to the output of itself (e.g., in the case where the
+// input and output types are the same), an implicit filter is added such that
+// a transformer is applicable only if that exact transformer is not already
+// in the tail of the [Path] since the last non-[Transform] step.
+// For situations where the implicit filter is still insufficient,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer],
+// which adds a filter to prevent the transformer from
+// being recursively applied upon itself.
+//
+// The name is a user provided label that is used as the [Transform.Name] in the
+// transformation [PathStep] (and eventually shown in the [Diff] output).
+// The name must be a valid identifier or qualified identifier in Go syntax.
+// If empty, an arbitrary name is used.
+func Transformer(name string, f interface{}) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
+ panic(fmt.Sprintf("invalid transformer function: %T", f))
+ }
+ if name == "" {
+ name = function.NameOf(v)
+ if !identsRx.MatchString(name) {
+ name = "λ" // Lambda-symbol as placeholder name
+ }
+ } else if !identsRx.MatchString(name) {
+ panic(fmt.Sprintf("invalid name: %q", name))
+ }
+ tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ tr.typ = ti
+ }
+ return tr
+}
+
+type transformer struct {
+ core
+ name string
+ typ reflect.Type // T
+ fnc reflect.Value // func(T) R
+}
+
+func (tr *transformer) isFiltered() bool { return tr.typ != nil }
+
+func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
+ for i := len(s.curPath) - 1; i >= 0; i-- {
+ if t, ok := s.curPath[i].(Transform); !ok {
+ break // Hit most recent non-Transform step
+ } else if tr == t.trans {
+ return nil // Cannot directly use same Transform
+ }
+ }
+ if tr.typ == nil || t.AssignableTo(tr.typ) {
+ return tr
+ }
+ return nil
+}
+
+func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
+ step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
+ vvx := s.callTRFunc(tr.fnc, vx, step)
+ vvy := s.callTRFunc(tr.fnc, vy, step)
+ step.vx, step.vy = vvx, vvy
+ s.compareAny(step)
+}
+
+func (tr transformer) String() string {
+ return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
+}
+
+// Comparer returns an [Option] that determines whether two values are equal
+// to each other.
+//
+// The comparer f must be a function "func(T, T) bool" and is implicitly
+// filtered to input values assignable to T. If T is an interface, it is
+// possible that f is called with two values of different concrete types that
+// both implement T.
+//
+// The equality function must be:
+// - Symmetric: equal(x, y) == equal(y, x)
+// - Deterministic: equal(x, y) == equal(x, y)
+// - Pure: equal(x, y) does not modify x or y
+func Comparer(f interface{}) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
+ panic(fmt.Sprintf("invalid comparer function: %T", f))
+ }
+ cm := &comparer{fnc: v}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ cm.typ = ti
+ }
+ return cm
+}
+
+type comparer struct {
+ core
+ typ reflect.Type // T
+ fnc reflect.Value // func(T, T) bool
+}
+
+func (cm *comparer) isFiltered() bool { return cm.typ != nil }
+
+func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
+ if cm.typ == nil || t.AssignableTo(cm.typ) {
+ return cm
+ }
+ return nil
+}
+
+func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
+ eq := s.callTTBFunc(cm.fnc, vx, vy)
+ s.report(eq, reportByFunc)
+}
+
+func (cm comparer) String() string {
+ return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
+}
+
+// Exporter returns an [Option] that specifies whether [Equal] is allowed to
+// introspect into the unexported fields of certain struct types.
+//
+// Users of this option must understand that comparing on unexported fields
+// from external packages is not safe since changes in the internal
+// implementation of some external package may cause the result of [Equal]
+// to unexpectedly change. However, it may be valid to use this option on types
+// defined in an internal package where the semantic meaning of an unexported
+// field is in the control of the user.
+//
+// In many cases, a custom [Comparer] should be used instead that defines
+// equality as a function of the public API of a type rather than the underlying
+// unexported implementation.
+//
+// For example, the [reflect.Type] documentation defines equality to be determined
+// by the == operator on the interface (essentially performing a shallow pointer
+// comparison) and most attempts to compare *[regexp.Regexp] types are interested
+// in only checking that the regular expression strings are equal.
+// Both of these are accomplished using [Comparer] options:
+//
+// Comparer(func(x, y reflect.Type) bool { return x == y })
+// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
+//
+// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]
+// option can be used to ignore all unexported fields on specified struct types.
+func Exporter(f func(reflect.Type) bool) Option {
+ return exporter(f)
+}
+
+type exporter func(reflect.Type) bool
+
+func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
+ panic("not implemented")
+}
+
+// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect
+// unexported fields of the specified struct types.
+//
+// See [Exporter] for the proper use of this option.
+func AllowUnexported(types ...interface{}) Option {
+ m := make(map[reflect.Type]bool)
+ for _, typ := range types {
+ t := reflect.TypeOf(typ)
+ if t.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("invalid struct type: %T", typ))
+ }
+ m[t] = true
+ }
+ return exporter(func(t reflect.Type) bool { return m[t] })
+}
+
+// Result represents the comparison result for a single node and
+// is provided by cmp when calling Report (see [Reporter]).
+type Result struct {
+ _ [0]func() // Make Result incomparable
+ flags resultFlags
+}
+
+// Equal reports whether the node was determined to be equal or not.
+// As a special case, ignored nodes are considered equal.
+func (r Result) Equal() bool {
+ return r.flags&(reportEqual|reportByIgnore) != 0
+}
+
+// ByIgnore reports whether the node is equal because it was ignored.
+// This never reports true if [Result.Equal] reports false.
+func (r Result) ByIgnore() bool {
+ return r.flags&reportByIgnore != 0
+}
+
+// ByMethod reports whether the Equal method determined equality.
+func (r Result) ByMethod() bool {
+ return r.flags&reportByMethod != 0
+}
+
+// ByFunc reports whether a [Comparer] function determined equality.
+func (r Result) ByFunc() bool {
+ return r.flags&reportByFunc != 0
+}
+
+// ByCycle reports whether a reference cycle was detected.
+func (r Result) ByCycle() bool {
+ return r.flags&reportByCycle != 0
+}
+
+type resultFlags uint
+
+const (
+ _ resultFlags = (1 << iota) / 2
+
+ reportEqual
+ reportUnequal
+ reportByIgnore
+ reportByMethod
+ reportByFunc
+ reportByCycle
+)
+
+// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses
+// the value trees, it calls PushStep as it descends into each node in the
+// tree and PopStep as it ascend out of the node. The leaves of the tree are
+// either compared (determined to be equal or not equal) or ignored and reported
+// as such by calling the Report method.
+func Reporter(r interface {
+ // PushStep is called when a tree-traversal operation is performed.
+ // The PathStep itself is only valid until the step is popped.
+ // The PathStep.Values are valid for the duration of the entire traversal
+ // and must not be mutated.
+ //
+ // Equal always calls PushStep at the start to provide an operation-less
+ // PathStep used to report the root values.
+ //
+ // Within a slice, the exact set of inserted, removed, or modified elements
+ // is unspecified and may change in future implementations.
+ // The entries of a map are iterated through in an unspecified order.
+ PushStep(PathStep)
+
+ // Report is called exactly once on leaf nodes to report whether the
+ // comparison identified the node as equal, unequal, or ignored.
+ // A leaf node is one that is immediately preceded by and followed by
+ // a pair of PushStep and PopStep calls.
+ Report(Result)
+
+ // PopStep ascends back up the value tree.
+ // There is always a matching pop call for every push call.
+ PopStep()
+}) Option {
+ return reporter{r}
+}
+
+type reporter struct{ reporterIface }
+type reporterIface interface {
+ PushStep(PathStep)
+ Report(Result)
+ PopStep()
+}
+
+func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
+ panic("not implemented")
+}
+
+// normalizeOption normalizes the input options such that all Options groups
+// are flattened and groups with a single element are reduced to that element.
+// Only coreOptions and Options containing coreOptions are allowed.
+func normalizeOption(src Option) Option {
+ switch opts := flattenOptions(nil, Options{src}); len(opts) {
+ case 0:
+ return nil
+ case 1:
+ return opts[0]
+ default:
+ return opts
+ }
+}
+
+// flattenOptions copies all options in src to dst as a flat list.
+// Only coreOptions and Options containing coreOptions are allowed.
+func flattenOptions(dst, src Options) Options {
+ for _, opt := range src {
+ switch opt := opt.(type) {
+ case nil:
+ continue
+ case Options:
+ dst = flattenOptions(dst, opt)
+ case coreOption:
+ dst = append(dst, opt)
+ default:
+ panic(fmt.Sprintf("invalid option type: %T", opt))
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go
new file mode 100644
index 0000000..c3c1456
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/path.go
@@ -0,0 +1,390 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+// Path is a list of [PathStep] describing the sequence of operations to get
+// from some root type to the current position in the value tree.
+// The first Path element is always an operation-less [PathStep] that exists
+// simply to identify the initial type.
+//
+// When traversing structs with embedded structs, the embedded struct will
+// always be accessed as a field before traversing the fields of the
+// embedded struct themselves. That is, an exported field from the
+// embedded struct will never be accessed directly from the parent struct.
+type Path []PathStep
+
+// PathStep is a union-type for specific operations to traverse
+// a value's tree structure. Users of this package never need to implement
+// these types as values of this type will be returned by this package.
+//
+// Implementations of this interface:
+// - [StructField]
+// - [SliceIndex]
+// - [MapIndex]
+// - [Indirect]
+// - [TypeAssertion]
+// - [Transform]
+type PathStep interface {
+ String() string
+
+ // Type is the resulting type after performing the path step.
+ Type() reflect.Type
+
+ // Values is the resulting values after performing the path step.
+ // The type of each valid value is guaranteed to be identical to Type.
+ //
+ // In some cases, one or both may be invalid or have restrictions:
+ // - For StructField, both are not interface-able if the current field
+ // is unexported and the struct type is not explicitly permitted by
+ // an Exporter to traverse unexported fields.
+ // - For SliceIndex, one may be invalid if an element is missing from
+ // either the x or y slice.
+ // - For MapIndex, one may be invalid if an entry is missing from
+ // either the x or y map.
+ //
+ // The provided values must not be mutated.
+ Values() (vx, vy reflect.Value)
+}
+
+var (
+ _ PathStep = StructField{}
+ _ PathStep = SliceIndex{}
+ _ PathStep = MapIndex{}
+ _ PathStep = Indirect{}
+ _ PathStep = TypeAssertion{}
+ _ PathStep = Transform{}
+)
+
+func (pa *Path) push(s PathStep) {
+ *pa = append(*pa, s)
+}
+
+func (pa *Path) pop() {
+ *pa = (*pa)[:len(*pa)-1]
+}
+
+// Last returns the last [PathStep] in the Path.
+// If the path is empty, this returns a non-nil [PathStep]
+// that reports a nil [PathStep.Type].
+func (pa Path) Last() PathStep {
+ return pa.Index(-1)
+}
+
+// Index returns the ith step in the Path and supports negative indexing.
+// A negative index starts counting from the tail of the Path such that -1
+// refers to the last step, -2 refers to the second-to-last step, and so on.
+// If index is invalid, this returns a non-nil [PathStep]
+// that reports a nil [PathStep.Type].
+func (pa Path) Index(i int) PathStep {
+ if i < 0 {
+ i = len(pa) + i
+ }
+ if i < 0 || i >= len(pa) {
+ return pathStep{}
+ }
+ return pa[i]
+}
+
+// String returns the simplified path to a node.
+// The simplified path only contains struct field accesses.
+//
+// For example:
+//
+// MyMap.MySlices.MyField
+func (pa Path) String() string {
+ var ss []string
+ for _, s := range pa {
+ if _, ok := s.(StructField); ok {
+ ss = append(ss, s.String())
+ }
+ }
+ return strings.TrimPrefix(strings.Join(ss, ""), ".")
+}
+
+// GoString returns the path to a specific node using Go syntax.
+//
+// For example:
+//
+// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
+func (pa Path) GoString() string {
+ var ssPre, ssPost []string
+ var numIndirect int
+ for i, s := range pa {
+ var nextStep PathStep
+ if i+1 < len(pa) {
+ nextStep = pa[i+1]
+ }
+ switch s := s.(type) {
+ case Indirect:
+ numIndirect++
+ pPre, pPost := "(", ")"
+ switch nextStep.(type) {
+ case Indirect:
+ continue // Next step is indirection, so let them batch up
+ case StructField:
+ numIndirect-- // Automatic indirection on struct fields
+ case nil:
+ pPre, pPost = "", "" // Last step; no need for parenthesis
+ }
+ if numIndirect > 0 {
+ ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
+ ssPost = append(ssPost, pPost)
+ }
+ numIndirect = 0
+ continue
+ case Transform:
+ ssPre = append(ssPre, s.trans.name+"(")
+ ssPost = append(ssPost, ")")
+ continue
+ }
+ ssPost = append(ssPost, s.String())
+ }
+ for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
+ ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
+ }
+ return strings.Join(ssPre, "") + strings.Join(ssPost, "")
+}
+
+type pathStep struct {
+ typ reflect.Type
+ vx, vy reflect.Value
+}
+
+func (ps pathStep) Type() reflect.Type { return ps.typ }
+func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
+func (ps pathStep) String() string {
+ if ps.typ == nil {
+ return ""
+ }
+ s := value.TypeString(ps.typ, false)
+ if s == "" || strings.ContainsAny(s, "{}\n") {
+ return "root" // Type too simple or complex to print
+ }
+ return fmt.Sprintf("{%s}", s)
+}
+
+// StructField is a [PathStep] that represents a struct field access
+// on a field called [StructField.Name].
+type StructField struct{ *structField }
+type structField struct {
+ pathStep
+ name string
+ idx int
+
+ // These fields are used for forcibly accessing an unexported field.
+ // pvx, pvy, and field are only valid if unexported is true.
+ unexported bool
+ mayForce bool // Forcibly allow visibility
+ paddr bool // Was parent addressable?
+ pvx, pvy reflect.Value // Parent values (always addressable)
+ field reflect.StructField // Field information
+}
+
+func (sf StructField) Type() reflect.Type { return sf.typ }
+func (sf StructField) Values() (vx, vy reflect.Value) {
+ if !sf.unexported {
+ return sf.vx, sf.vy // CanInterface reports true
+ }
+
+ // Forcibly obtain read-write access to an unexported struct field.
+ if sf.mayForce {
+ vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
+ vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
+ return vx, vy // CanInterface reports true
+ }
+ return sf.vx, sf.vy // CanInterface reports false
+}
+func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
+
+// Name is the field name.
+func (sf StructField) Name() string { return sf.name }
+
+// Index is the index of the field in the parent struct type.
+// See [reflect.Type.Field].
+func (sf StructField) Index() int { return sf.idx }
+
+// SliceIndex is a [PathStep] that represents an index operation on
+// a slice or array at some index [SliceIndex.Key].
+type SliceIndex struct{ *sliceIndex }
+type sliceIndex struct {
+ pathStep
+ xkey, ykey int
+ isSlice bool // False for reflect.Array
+}
+
+func (si SliceIndex) Type() reflect.Type { return si.typ }
+func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
+func (si SliceIndex) String() string {
+ switch {
+ case si.xkey == si.ykey:
+ return fmt.Sprintf("[%d]", si.xkey)
+ case si.ykey == -1:
+ // [5->?] means "I don't know where X[5] went"
+ return fmt.Sprintf("[%d->?]", si.xkey)
+ case si.xkey == -1:
+ // [?->3] means "I don't know where Y[3] came from"
+ return fmt.Sprintf("[?->%d]", si.ykey)
+ default:
+ // [5->3] means "X[5] moved to Y[3]"
+ return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
+ }
+}
+
+// Key is the index key; it may return -1 if in a split state
+func (si SliceIndex) Key() int {
+ if si.xkey != si.ykey {
+ return -1
+ }
+ return si.xkey
+}
+
+// SplitKeys are the indexes for indexing into slices in the
+// x and y values, respectively. These indexes may differ due to the
+// insertion or removal of an element in one of the slices, causing
+// all of the indexes to be shifted. If an index is -1, then that
+// indicates that the element does not exist in the associated slice.
+//
+// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes
+// returned by SplitKeys are not the same. SplitKeys will never return -1 for
+// both indexes.
+func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
+
+// MapIndex is a [PathStep] that represents an index operation on a map at some index Key.
+type MapIndex struct{ *mapIndex }
+type mapIndex struct {
+ pathStep
+ key reflect.Value
+}
+
+func (mi MapIndex) Type() reflect.Type { return mi.typ }
+func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
+func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
+
+// Key is the value of the map key.
+func (mi MapIndex) Key() reflect.Value { return mi.key }
+
+// Indirect is a [PathStep] that represents pointer indirection on the parent type.
+type Indirect struct{ *indirect }
+type indirect struct {
+ pathStep
+}
+
+func (in Indirect) Type() reflect.Type { return in.typ }
+func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
+func (in Indirect) String() string { return "*" }
+
+// TypeAssertion is a [PathStep] that represents a type assertion on an interface.
+type TypeAssertion struct{ *typeAssertion }
+type typeAssertion struct {
+ pathStep
+}
+
+func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
+func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
+func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
+
+// Transform is a [PathStep] that represents a transformation
+// from the parent type to the current type.
+type Transform struct{ *transform }
+type transform struct {
+ pathStep
+ trans *transformer
+}
+
+func (tf Transform) Type() reflect.Type { return tf.typ }
+func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
+func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
+
+// Name is the name of the [Transformer].
+func (tf Transform) Name() string { return tf.trans.name }
+
+// Func is the function pointer to the transformer function.
+func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
+
+// Option returns the originally constructed [Transformer] option.
+// The == operator can be used to detect the exact option used.
+func (tf Transform) Option() Option { return tf.trans }
+
+// pointerPath represents a dual-stack of pointers encountered when
+// recursively traversing the x and y values. This data structure supports
+// detection of cycles and determining whether the cycles are equal.
+// In Go, cycles can occur via pointers, slices, and maps.
+//
+// The pointerPath uses a map to represent a stack; where descension into a
+// pointer pushes the address onto the stack, and ascension from a pointer
+// pops the address from the stack. Thus, when traversing into a pointer from
+// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
+// by checking whether the pointer has already been visited. The cycle detection
+// uses a separate stack for the x and y values.
+//
+// If a cycle is detected we need to determine whether the two pointers
+// should be considered equal. The definition of equality chosen by Equal
+// requires two graphs to have the same structure. To determine this, both the
+// x and y values must have a cycle where the previous pointers were also
+// encountered together as a pair.
+//
+// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and
+// MapIndex with pointer information for the x and y values.
+// Suppose px and py are two pointers to compare, we then search the
+// Path for whether px was ever encountered in the Path history of x, and
+// similarly so with py. If either side has a cycle, the comparison is only
+// equal if both px and py have a cycle resulting from the same PathStep.
+//
+// Using a map as a stack is more performant as we can perform cycle detection
+// in O(1) instead of O(N) where N is len(Path).
+type pointerPath struct {
+ // mx is keyed by x pointers, where the value is the associated y pointer.
+ mx map[value.Pointer]value.Pointer
+ // my is keyed by y pointers, where the value is the associated x pointer.
+ my map[value.Pointer]value.Pointer
+}
+
+func (p *pointerPath) Init() {
+ p.mx = make(map[value.Pointer]value.Pointer)
+ p.my = make(map[value.Pointer]value.Pointer)
+}
+
+// Push indicates intent to descend into pointers vx and vy where
+// visited reports whether either has been seen before. If visited before,
+// equal reports whether both pointers were encountered together.
+// Pop must be called if and only if the pointers were never visited.
+//
+// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map
+// and be non-nil.
+func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) {
+ px := value.PointerOf(vx)
+ py := value.PointerOf(vy)
+ _, ok1 := p.mx[px]
+ _, ok2 := p.my[py]
+ if ok1 || ok2 {
+ equal = p.mx[px] == py && p.my[py] == px // Pointers paired together
+ return equal, true
+ }
+ p.mx[px] = py
+ p.my[py] = px
+ return false, false
+}
+
+// Pop ascends from pointers vx and vy.
+func (p pointerPath) Pop(vx, vy reflect.Value) {
+ delete(p.mx, value.PointerOf(vx))
+ delete(p.my, value.PointerOf(vy))
+}
+
+// isExported reports whether the identifier is exported.
+func isExported(id string) bool {
+ r, _ := utf8.DecodeRuneInString(id)
+ return unicode.IsUpper(r)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go
new file mode 100644
index 0000000..f43cd12
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report.go
@@ -0,0 +1,54 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+// defaultReporter implements the reporter interface.
+//
+// As Equal serially calls the PushStep, Report, and PopStep methods, the
+// defaultReporter constructs a tree-based representation of the compared value
+// and the result of each comparison (see valueNode).
+//
+// When the String method is called, the FormatDiff method transforms the
+// valueNode tree into a textNode tree, which is a tree-based representation
+// of the textual output (see textNode).
+//
+// Lastly, the textNode.String method produces the final report as a string.
+type defaultReporter struct {
+ root *valueNode
+ curr *valueNode
+}
+
+func (r *defaultReporter) PushStep(ps PathStep) {
+ r.curr = r.curr.PushStep(ps)
+ if r.root == nil {
+ r.root = r.curr
+ }
+}
+func (r *defaultReporter) Report(rs Result) {
+ r.curr.Report(rs)
+}
+func (r *defaultReporter) PopStep() {
+ r.curr = r.curr.PopStep()
+}
+
+// String provides a full report of the differences detected as a structured
+// literal in pseudo-Go syntax. String may only be called after the entire tree
+// has been traversed.
+func (r *defaultReporter) String() string {
+ assert(r.root != nil && r.curr == nil)
+ if r.root.NumDiff == 0 {
+ return ""
+ }
+ ptrs := new(pointerReferences)
+ text := formatOptions{}.FormatDiff(r.root, ptrs)
+ resolveReferences(text)
+ return text.String()
+}
+
+func assert(ok bool) {
+ if !ok {
+ panic("assertion failure")
+ }
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go
new file mode 100644
index 0000000..2050bf6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go
@@ -0,0 +1,433 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// numContextRecords is the number of surrounding equal records to print.
+const numContextRecords = 2
+
+type diffMode byte
+
+const (
+ diffUnknown diffMode = 0
+ diffIdentical diffMode = ' '
+ diffRemoved diffMode = '-'
+ diffInserted diffMode = '+'
+)
+
+type typeMode int
+
+const (
+ // emitType always prints the type.
+ emitType typeMode = iota
+ // elideType never prints the type.
+ elideType
+ // autoType prints the type only for composite kinds
+ // (i.e., structs, slices, arrays, and maps).
+ autoType
+)
+
+type formatOptions struct {
+ // DiffMode controls the output mode of FormatDiff.
+ //
+ // If diffUnknown, then produce a diff of the x and y values.
+ // If diffIdentical, then emit values as if they were equal.
+ // If diffRemoved, then only emit x values (ignoring y values).
+ // If diffInserted, then only emit y values (ignoring x values).
+ DiffMode diffMode
+
+ // TypeMode controls whether to print the type for the current node.
+ //
+ // As a general rule of thumb, we always print the type of the next node
+ // after an interface, and always elide the type of the next node after
+ // a slice or map node.
+ TypeMode typeMode
+
+ // formatValueOptions are options specific to printing reflect.Values.
+ formatValueOptions
+}
+
+func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
+ opts.DiffMode = d
+ return opts
+}
+func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
+ opts.TypeMode = t
+ return opts
+}
+func (opts formatOptions) WithVerbosity(level int) formatOptions {
+ opts.VerbosityLevel = level
+ opts.LimitVerbosity = true
+ return opts
+}
+func (opts formatOptions) verbosity() uint {
+ switch {
+ case opts.VerbosityLevel < 0:
+ return 0
+ case opts.VerbosityLevel > 16:
+ return 16 // some reasonable maximum to avoid shift overflow
+ default:
+ return uint(opts.VerbosityLevel)
+ }
+}
+
+const maxVerbosityPreset = 6
+
+// verbosityPreset modifies the verbosity settings given an index
+// between 0 and maxVerbosityPreset, inclusive.
+func verbosityPreset(opts formatOptions, i int) formatOptions {
+ opts.VerbosityLevel = int(opts.verbosity()) + 2*i
+ if i > 0 {
+ opts.AvoidStringer = true
+ }
+ if i >= maxVerbosityPreset {
+ opts.PrintAddresses = true
+ opts.QualifiedNames = true
+ }
+ return opts
+}
+
+// FormatDiff converts a valueNode tree into a textNode tree, where the later
+// is a textual representation of the differences detected in the former.
+func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
+ if opts.DiffMode == diffIdentical {
+ opts = opts.WithVerbosity(1)
+ } else if opts.verbosity() < 3 {
+ opts = opts.WithVerbosity(3)
+ }
+
+ // Check whether we have specialized formatting for this node.
+ // This is not necessary, but helpful for producing more readable outputs.
+ if opts.CanFormatDiffSlice(v) {
+ return opts.FormatDiffSlice(v)
+ }
+
+ var parentKind reflect.Kind
+ if v.parent != nil && v.parent.TransformerName == "" {
+ parentKind = v.parent.Type.Kind()
+ }
+
+ // For leaf nodes, format the value based on the reflect.Values alone.
+ // As a special case, treat equal []byte as a leaf nodes.
+ isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
+ isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
+ if v.MaxDepth == 0 || isEqualBytes {
+ switch opts.DiffMode {
+ case diffUnknown, diffIdentical:
+ // Format Equal.
+ if v.NumDiff == 0 {
+ outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
+ outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
+ if v.NumIgnored > 0 && v.NumSame == 0 {
+ return textEllipsis
+ } else if outx.Len() < outy.Len() {
+ return outx
+ } else {
+ return outy
+ }
+ }
+
+ // Format unequal.
+ assert(opts.DiffMode == diffUnknown)
+ var list textList
+ outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
+ outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
+ for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
+ opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
+ outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
+ outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
+ }
+ if outx != nil {
+ list = append(list, textRecord{Diff: '-', Value: outx})
+ }
+ if outy != nil {
+ list = append(list, textRecord{Diff: '+', Value: outy})
+ }
+ return opts.WithTypeMode(emitType).FormatType(v.Type, list)
+ case diffRemoved:
+ return opts.FormatValue(v.ValueX, parentKind, ptrs)
+ case diffInserted:
+ return opts.FormatValue(v.ValueY, parentKind, ptrs)
+ default:
+ panic("invalid diff mode")
+ }
+ }
+
+ // Register slice element to support cycle detection.
+ if parentKind == reflect.Slice {
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
+ defer ptrs.Pop()
+ defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
+ }
+
+ // Descend into the child value node.
+ if v.TransformerName != "" {
+ out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
+ out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
+ return opts.FormatType(v.Type, out)
+ } else {
+ switch k := v.Type.Kind(); k {
+ case reflect.Struct, reflect.Array, reflect.Slice:
+ out = opts.formatDiffList(v.Records, k, ptrs)
+ out = opts.FormatType(v.Type, out)
+ case reflect.Map:
+ // Register map to support cycle detection.
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
+ defer ptrs.Pop()
+
+ out = opts.formatDiffList(v.Records, k, ptrs)
+ out = wrapTrunkReferences(ptrRefs, out)
+ out = opts.FormatType(v.Type, out)
+ case reflect.Ptr:
+ // Register pointer to support cycle detection.
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
+ defer ptrs.Pop()
+
+ out = opts.FormatDiff(v.Value, ptrs)
+ out = wrapTrunkReferences(ptrRefs, out)
+ out = &textWrap{Prefix: "&", Value: out}
+ case reflect.Interface:
+ out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
+ default:
+ panic(fmt.Sprintf("%v cannot have children", k))
+ }
+ return out
+ }
+}
+
+func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
+ // Derive record name based on the data structure kind.
+ var name string
+ var formatKey func(reflect.Value) string
+ switch k {
+ case reflect.Struct:
+ name = "field"
+ opts = opts.WithTypeMode(autoType)
+ formatKey = func(v reflect.Value) string { return v.String() }
+ case reflect.Slice, reflect.Array:
+ name = "element"
+ opts = opts.WithTypeMode(elideType)
+ formatKey = func(reflect.Value) string { return "" }
+ case reflect.Map:
+ name = "entry"
+ opts = opts.WithTypeMode(elideType)
+ formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
+ }
+
+ maxLen := -1
+ if opts.LimitVerbosity {
+ if opts.DiffMode == diffIdentical {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ } else {
+ maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
+ }
+ opts.VerbosityLevel--
+ }
+
+ // Handle unification.
+ switch opts.DiffMode {
+ case diffIdentical, diffRemoved, diffInserted:
+ var list textList
+ var deferredEllipsis bool // Add final "..." to indicate records were dropped
+ for _, r := range recs {
+ if len(list) == maxLen {
+ deferredEllipsis = true
+ break
+ }
+
+ // Elide struct fields that are zero value.
+ if k == reflect.Struct {
+ var isZero bool
+ switch opts.DiffMode {
+ case diffIdentical:
+ isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
+ case diffRemoved:
+ isZero = r.Value.ValueX.IsZero()
+ case diffInserted:
+ isZero = r.Value.ValueY.IsZero()
+ }
+ if isZero {
+ continue
+ }
+ }
+ // Elide ignored nodes.
+ if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
+ deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
+ if !deferredEllipsis {
+ list.AppendEllipsis(diffStats{})
+ }
+ continue
+ }
+ if out := opts.FormatDiff(r.Value, ptrs); out != nil {
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ }
+ }
+ if deferredEllipsis {
+ list.AppendEllipsis(diffStats{})
+ }
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ case diffUnknown:
+ default:
+ panic("invalid diff mode")
+ }
+
+ // Handle differencing.
+ var numDiffs int
+ var list textList
+ var keys []reflect.Value // invariant: len(list) == len(keys)
+ groups := coalesceAdjacentRecords(name, recs)
+ maxGroup := diffStats{Name: name}
+ for i, ds := range groups {
+ if maxLen >= 0 && numDiffs >= maxLen {
+ maxGroup = maxGroup.Append(ds)
+ continue
+ }
+
+ // Handle equal records.
+ if ds.NumDiff() == 0 {
+ // Compute the number of leading and trailing records to print.
+ var numLo, numHi int
+ numEqual := ds.NumIgnored + ds.NumIdentical
+ for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
+ if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+ break
+ }
+ numLo++
+ }
+ for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+ if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+ break
+ }
+ numHi++
+ }
+ if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
+ numHi++ // Avoid pointless coalescing of a single equal record
+ }
+
+ // Format the equal values.
+ for _, r := range recs[:numLo] {
+ out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ if numEqual > numLo+numHi {
+ ds.NumIdentical -= numLo + numHi
+ list.AppendEllipsis(ds)
+ for len(keys) < len(list) {
+ keys = append(keys, reflect.Value{})
+ }
+ }
+ for _, r := range recs[numEqual-numHi : numEqual] {
+ out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ recs = recs[numEqual:]
+ continue
+ }
+
+ // Handle unequal records.
+ for _, r := range recs[:ds.NumDiff()] {
+ switch {
+ case opts.CanFormatDiffSlice(r.Value):
+ out := opts.FormatDiffSlice(r.Value)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ case r.Value.NumChildren == r.Value.MaxDepth:
+ outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
+ outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
+ for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
+ opts2 := verbosityPreset(opts, i)
+ outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
+ outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
+ }
+ if outx != nil {
+ list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
+ keys = append(keys, r.Key)
+ }
+ if outy != nil {
+ list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
+ keys = append(keys, r.Key)
+ }
+ default:
+ out := opts.FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ }
+ recs = recs[ds.NumDiff():]
+ numDiffs += ds.NumDiff()
+ }
+ if maxGroup.IsZero() {
+ assert(len(recs) == 0)
+ } else {
+ list.AppendEllipsis(maxGroup)
+ for len(keys) < len(list) {
+ keys = append(keys, reflect.Value{})
+ }
+ }
+ assert(len(list) == len(keys))
+
+ // For maps, the default formatting logic uses fmt.Stringer which may
+ // produce ambiguous output. Avoid calling String to disambiguate.
+ if k == reflect.Map {
+ var ambiguous bool
+ seenKeys := map[string]reflect.Value{}
+ for i, currKey := range keys {
+ if currKey.IsValid() {
+ strKey := list[i].Key
+ prevKey, seen := seenKeys[strKey]
+ if seen && prevKey.CanInterface() && currKey.CanInterface() {
+ ambiguous = prevKey.Interface() != currKey.Interface()
+ if ambiguous {
+ break
+ }
+ }
+ seenKeys[strKey] = currKey
+ }
+ }
+ if ambiguous {
+ for i, k := range keys {
+ if k.IsValid() {
+ list[i].Key = formatMapKey(k, true, ptrs)
+ }
+ }
+ }
+ }
+
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+}
+
+// coalesceAdjacentRecords coalesces the list of records into groups of
+// adjacent equal, or unequal counts.
+func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
+ var prevCase int // Arbitrary index into which case last occurred
+ lastStats := func(i int) *diffStats {
+ if prevCase != i {
+ groups = append(groups, diffStats{Name: name})
+ prevCase = i
+ }
+ return &groups[len(groups)-1]
+ }
+ for _, r := range recs {
+ switch rv := r.Value; {
+ case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
+ lastStats(1).NumIgnored++
+ case rv.NumDiff == 0:
+ lastStats(1).NumIdentical++
+ case rv.NumDiff > 0 && !rv.ValueY.IsValid():
+ lastStats(2).NumRemoved++
+ case rv.NumDiff > 0 && !rv.ValueX.IsValid():
+ lastStats(2).NumInserted++
+ default:
+ lastStats(2).NumModified++
+ }
+ }
+ return groups
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go
new file mode 100644
index 0000000..be31b33
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_references.go
@@ -0,0 +1,264 @@
+// Copyright 2020, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+const (
+ pointerDelimPrefix = "⟪"
+ pointerDelimSuffix = "⟫"
+)
+
+// formatPointer prints the address of the pointer.
+func formatPointer(p value.Pointer, withDelims bool) string {
+ v := p.Uintptr()
+ if flags.Deterministic {
+ v = 0xdeadf00f // Only used for stable testing purposes
+ }
+ if withDelims {
+ return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
+ }
+ return formatHex(uint64(v))
+}
+
+// pointerReferences is a stack of pointers visited so far.
+type pointerReferences [][2]value.Pointer
+
+func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
+ if deref && vx.IsValid() {
+ vx = vx.Addr()
+ }
+ if deref && vy.IsValid() {
+ vy = vy.Addr()
+ }
+ switch d {
+ case diffUnknown, diffIdentical:
+ pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
+ case diffRemoved:
+ pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
+ case diffInserted:
+ pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
+ }
+ *ps = append(*ps, pp)
+ return pp
+}
+
+func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
+ p = value.PointerOf(v)
+ for _, pp := range *ps {
+ if p == pp[0] || p == pp[1] {
+ return p, true
+ }
+ }
+ *ps = append(*ps, [2]value.Pointer{p, p})
+ return p, false
+}
+
+func (ps *pointerReferences) Pop() {
+ *ps = (*ps)[:len(*ps)-1]
+}
+
+// trunkReferences is metadata for a textNode indicating that the sub-tree
+// represents the value for either pointer in a pair of references.
+type trunkReferences struct{ pp [2]value.Pointer }
+
+// trunkReference is metadata for a textNode indicating that the sub-tree
+// represents the value for the given pointer reference.
+type trunkReference struct{ p value.Pointer }
+
+// leafReference is metadata for a textNode indicating that the value is
+// truncated as it refers to another part of the tree (i.e., a trunk).
+type leafReference struct{ p value.Pointer }
+
+func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
+ switch {
+ case pp[0].IsNil():
+ return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
+ case pp[1].IsNil():
+ return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
+ case pp[0] == pp[1]:
+ return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
+ default:
+ return &textWrap{Value: s, Metadata: trunkReferences{pp}}
+ }
+}
+func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
+ var prefix string
+ if printAddress {
+ prefix = formatPointer(p, true)
+ }
+ return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
+}
+func makeLeafReference(p value.Pointer, printAddress bool) textNode {
+ out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
+ var prefix string
+ if printAddress {
+ prefix = formatPointer(p, true)
+ }
+ return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
+}
+
+// resolveReferences walks the textNode tree searching for any leaf reference
+// metadata and resolves each against the corresponding trunk references.
+// Since pointer addresses in memory are not particularly readable to the user,
+// it replaces each pointer value with an arbitrary and unique reference ID.
+func resolveReferences(s textNode) {
+ var walkNodes func(textNode, func(textNode))
+ walkNodes = func(s textNode, f func(textNode)) {
+ f(s)
+ switch s := s.(type) {
+ case *textWrap:
+ walkNodes(s.Value, f)
+ case textList:
+ for _, r := range s {
+ walkNodes(r.Value, f)
+ }
+ }
+ }
+
+ // Collect all trunks and leaves with reference metadata.
+ var trunks, leaves []*textWrap
+ walkNodes(s, func(s textNode) {
+ if s, ok := s.(*textWrap); ok {
+ switch s.Metadata.(type) {
+ case leafReference:
+ leaves = append(leaves, s)
+ case trunkReference, trunkReferences:
+ trunks = append(trunks, s)
+ }
+ }
+ })
+
+ // No leaf references to resolve.
+ if len(leaves) == 0 {
+ return
+ }
+
+ // Collect the set of all leaf references to resolve.
+ leafPtrs := make(map[value.Pointer]bool)
+ for _, leaf := range leaves {
+ leafPtrs[leaf.Metadata.(leafReference).p] = true
+ }
+
+ // Collect the set of trunk pointers that are always paired together.
+ // This allows us to assign a single ID to both pointers for brevity.
+ // If a pointer in a pair ever occurs by itself or as a different pair,
+ // then the pair is broken.
+ pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
+ unpair := func(p value.Pointer) {
+ if !pairedTrunkPtrs[p].IsNil() {
+ pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
+ }
+ pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
+ }
+ for _, trunk := range trunks {
+ switch p := trunk.Metadata.(type) {
+ case trunkReference:
+ unpair(p.p) // standalone pointer cannot be part of a pair
+ case trunkReferences:
+ p0, ok0 := pairedTrunkPtrs[p.pp[0]]
+ p1, ok1 := pairedTrunkPtrs[p.pp[1]]
+ switch {
+ case !ok0 && !ok1:
+ // Register the newly seen pair.
+ pairedTrunkPtrs[p.pp[0]] = p.pp[1]
+ pairedTrunkPtrs[p.pp[1]] = p.pp[0]
+ case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
+ // Exact pair already seen; do nothing.
+ default:
+ // Pair conflicts with some other pair; break all pairs.
+ unpair(p.pp[0])
+ unpair(p.pp[1])
+ }
+ }
+ }
+
+ // Correlate each pointer referenced by leaves to a unique identifier,
+ // and print the IDs for each trunk that matches those pointers.
+ var nextID uint
+ ptrIDs := make(map[value.Pointer]uint)
+ newID := func() uint {
+ id := nextID
+ nextID++
+ return id
+ }
+ for _, trunk := range trunks {
+ switch p := trunk.Metadata.(type) {
+ case trunkReference:
+ if print := leafPtrs[p.p]; print {
+ id, ok := ptrIDs[p.p]
+ if !ok {
+ id = newID()
+ ptrIDs[p.p] = id
+ }
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
+ }
+ case trunkReferences:
+ print0 := leafPtrs[p.pp[0]]
+ print1 := leafPtrs[p.pp[1]]
+ if print0 || print1 {
+ id0, ok0 := ptrIDs[p.pp[0]]
+ id1, ok1 := ptrIDs[p.pp[1]]
+ isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
+ if isPair {
+ var id uint
+ assert(ok0 == ok1) // must be seen together or not at all
+ if ok0 {
+ assert(id0 == id1) // must have the same ID
+ id = id0
+ } else {
+ id = newID()
+ ptrIDs[p.pp[0]] = id
+ ptrIDs[p.pp[1]] = id
+ }
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
+ } else {
+ if print0 && !ok0 {
+ id0 = newID()
+ ptrIDs[p.pp[0]] = id0
+ }
+ if print1 && !ok1 {
+ id1 = newID()
+ ptrIDs[p.pp[1]] = id1
+ }
+ switch {
+ case print0 && print1:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
+ case print0:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
+ case print1:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
+ }
+ }
+ }
+ }
+ }
+
+ // Update all leaf references with the unique identifier.
+ for _, leaf := range leaves {
+ if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
+ leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
+ }
+ }
+}
+
+func formatReference(id uint) string {
+ return fmt.Sprintf("ref#%d", id)
+}
+
+func updateReferencePrefix(prefix, ref string) string {
+ if prefix == "" {
+ return pointerDelimPrefix + ref + pointerDelimSuffix
+ }
+ suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
+ return pointerDelimPrefix + ref + ": " + suffix
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
new file mode 100644
index 0000000..e39f422
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
@@ -0,0 +1,414 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+var (
+ anyType = reflect.TypeOf((*interface{})(nil)).Elem()
+ stringType = reflect.TypeOf((*string)(nil)).Elem()
+ bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
+ byteType = reflect.TypeOf((*byte)(nil)).Elem()
+)
+
+type formatValueOptions struct {
+ // AvoidStringer controls whether to avoid calling custom stringer
+ // methods like error.Error or fmt.Stringer.String.
+ AvoidStringer bool
+
+ // PrintAddresses controls whether to print the address of all pointers,
+ // slice elements, and maps.
+ PrintAddresses bool
+
+ // QualifiedNames controls whether FormatType uses the fully qualified name
+ // (including the full package path as opposed to just the package name).
+ QualifiedNames bool
+
+ // VerbosityLevel controls the amount of output to produce.
+ // A higher value produces more output. A value of zero or lower produces
+ // no output (represented using an ellipsis).
+ // If LimitVerbosity is false, then the level is treated as infinite.
+ VerbosityLevel int
+
+ // LimitVerbosity specifies that formatting should respect VerbosityLevel.
+ LimitVerbosity bool
+}
+
+// FormatType prints the type as if it were wrapping s.
+// This may return s as-is depending on the current type and TypeMode mode.
+func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
+ // Check whether to emit the type or not.
+ switch opts.TypeMode {
+ case autoType:
+ switch t.Kind() {
+ case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
+ if s.Equal(textNil) {
+ return s
+ }
+ default:
+ return s
+ }
+ if opts.DiffMode == diffIdentical {
+ return s // elide type for identical nodes
+ }
+ case elideType:
+ return s
+ }
+
+ // Determine the type label, applying special handling for unnamed types.
+ typeName := value.TypeString(t, opts.QualifiedNames)
+ if t.Name() == "" {
+ // According to Go grammar, certain type literals contain symbols that
+ // do not strongly bind to the next lexicographical token (e.g., *T).
+ switch t.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Ptr:
+ typeName = "(" + typeName + ")"
+ }
+ }
+ return &textWrap{Prefix: typeName, Value: wrapParens(s)}
+}
+
+// wrapParens wraps s with a set of parenthesis, but avoids it if the
+// wrapped node itself is already surrounded by a pair of parenthesis or braces.
+// It handles unwrapping one level of pointer-reference nodes.
+func wrapParens(s textNode) textNode {
+ var refNode *textWrap
+ if s2, ok := s.(*textWrap); ok {
+ // Unwrap a single pointer reference node.
+ switch s2.Metadata.(type) {
+ case leafReference, trunkReference, trunkReferences:
+ refNode = s2
+ if s3, ok := refNode.Value.(*textWrap); ok {
+ s2 = s3
+ }
+ }
+
+ // Already has delimiters that make parenthesis unnecessary.
+ hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
+ hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
+ if hasParens || hasBraces {
+ return s
+ }
+ }
+ if refNode != nil {
+ refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
+ return s
+ }
+ return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
+}
+
+// FormatValue prints the reflect.Value, taking extra care to avoid descending
+// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
+func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
+ if !v.IsValid() {
+ return nil
+ }
+ t := v.Type()
+
+ // Check slice element for cycles.
+ if parentKind == reflect.Slice {
+ ptrRef, visited := ptrs.Push(v.Addr())
+ if visited {
+ return makeLeafReference(ptrRef, false)
+ }
+ defer ptrs.Pop()
+ defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
+ }
+
+ // Check whether there is an Error or String method to call.
+ if !opts.AvoidStringer && v.CanInterface() {
+ // Avoid calling Error or String methods on nil receivers since many
+ // implementations crash when doing so.
+ if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
+ var prefix, strVal string
+ func() {
+ // Swallow and ignore any panics from String or Error.
+ defer func() { recover() }()
+ switch v := v.Interface().(type) {
+ case error:
+ strVal = v.Error()
+ prefix = "e"
+ case fmt.Stringer:
+ strVal = v.String()
+ prefix = "s"
+ }
+ }()
+ if prefix != "" {
+ return opts.formatString(prefix, strVal)
+ }
+ }
+ }
+
+ // Check whether to explicitly wrap the result with the type.
+ var skipType bool
+ defer func() {
+ if !skipType {
+ out = opts.FormatType(t, out)
+ }
+ }()
+
+ switch t.Kind() {
+ case reflect.Bool:
+ return textLine(fmt.Sprint(v.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return textLine(fmt.Sprint(v.Int()))
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return textLine(fmt.Sprint(v.Uint()))
+ case reflect.Uint8:
+ if parentKind == reflect.Slice || parentKind == reflect.Array {
+ return textLine(formatHex(v.Uint()))
+ }
+ return textLine(fmt.Sprint(v.Uint()))
+ case reflect.Uintptr:
+ return textLine(formatHex(v.Uint()))
+ case reflect.Float32, reflect.Float64:
+ return textLine(fmt.Sprint(v.Float()))
+ case reflect.Complex64, reflect.Complex128:
+ return textLine(fmt.Sprint(v.Complex()))
+ case reflect.String:
+ return opts.formatString("", v.String())
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ return textLine(formatPointer(value.PointerOf(v), true))
+ case reflect.Struct:
+ var list textList
+ v := makeAddressable(v) // needed for retrieveUnexportedField
+ maxLen := v.NumField()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ for i := 0; i < v.NumField(); i++ {
+ vv := v.Field(i)
+ if vv.IsZero() {
+ continue // Elide fields with zero values
+ }
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ sf := t.Field(i)
+ if !isExported(sf.Name) {
+ vv = retrieveUnexportedField(v, sf, true)
+ }
+ s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
+ list = append(list, textRecord{Key: sf.Name, Value: s})
+ }
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ case reflect.Slice:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check whether this is a []byte of text data.
+ if t.Elem() == byteType {
+ b := v.Bytes()
+ isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
+ if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
+ out = opts.formatString("", string(b))
+ skipType = true
+ return opts.FormatType(t, out)
+ }
+ }
+
+ fallthrough
+ case reflect.Array:
+ maxLen := v.Len()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ var list textList
+ for i := 0; i < v.Len(); i++ {
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
+ list = append(list, textRecord{Value: s})
+ }
+
+ out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ if t.Kind() == reflect.Slice && opts.PrintAddresses {
+ header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
+ out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
+ }
+ return out
+ case reflect.Map:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check pointer for cycles.
+ ptrRef, visited := ptrs.Push(v)
+ if visited {
+ return makeLeafReference(ptrRef, opts.PrintAddresses)
+ }
+ defer ptrs.Pop()
+
+ maxLen := v.Len()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ var list textList
+ for _, k := range value.SortKeys(v.MapKeys()) {
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ sk := formatMapKey(k, false, ptrs)
+ sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
+ list = append(list, textRecord{Key: sk, Value: sv})
+ }
+
+ out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
+ return out
+ case reflect.Ptr:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check pointer for cycles.
+ ptrRef, visited := ptrs.Push(v)
+ if visited {
+ out = makeLeafReference(ptrRef, opts.PrintAddresses)
+ return &textWrap{Prefix: "&", Value: out}
+ }
+ defer ptrs.Pop()
+
+ // Skip the name only if this is an unnamed pointer type.
+ // Otherwise taking the address of a value does not reproduce
+ // the named pointer type.
+ if v.Type().Name() == "" {
+ skipType = true // Let the underlying value print the type instead
+ }
+ out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
+ out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
+ out = &textWrap{Prefix: "&", Value: out}
+ return out
+ case reflect.Interface:
+ if v.IsNil() {
+ return textNil
+ }
+ // Interfaces accept different concrete types,
+ // so configure the underlying value to explicitly print the type.
+ return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
+ default:
+ panic(fmt.Sprintf("%v kind not handled", v.Kind()))
+ }
+}
+
+func (opts formatOptions) formatString(prefix, s string) textNode {
+ maxLen := len(s)
+ maxLines := strings.Count(s, "\n") + 1
+ if opts.LimitVerbosity {
+ maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
+ maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
+ }
+
+ // For multiline strings, use the triple-quote syntax,
+ // but only use it when printing removed or inserted nodes since
+ // we only want the extra verbosity for those cases.
+ lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
+ isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
+ for i := 0; i < len(lines) && isTripleQuoted; i++ {
+ lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
+ isPrintable := func(r rune) bool {
+ return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
+ }
+ line := lines[i]
+ isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
+ }
+ if isTripleQuoted {
+ var list textList
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
+ for i, line := range lines {
+ if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
+ comment := commentString(fmt.Sprintf("%d elided lines", numElided))
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
+ break
+ }
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
+ }
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
+ return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
+ }
+
+ // Format the string as a single-line quoted string.
+ if len(s) > maxLen+len(textEllipsis) {
+ return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
+ }
+ return textLine(prefix + formatString(s))
+}
+
+// formatMapKey formats v as if it were a map key.
+// The result is guaranteed to be a single line.
+func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
+ var opts formatOptions
+ opts.DiffMode = diffIdentical
+ opts.TypeMode = elideType
+ opts.PrintAddresses = disambiguate
+ opts.AvoidStringer = disambiguate
+ opts.QualifiedNames = disambiguate
+ opts.VerbosityLevel = maxVerbosityPreset
+ opts.LimitVerbosity = true
+ s := opts.FormatValue(v, reflect.Map, ptrs).String()
+ return strings.TrimSpace(s)
+}
+
+// formatString prints s as a double-quoted or backtick-quoted string.
+func formatString(s string) string {
+ // Use quoted string if it the same length as a raw string literal.
+ // Otherwise, attempt to use the raw string form.
+ qs := strconv.Quote(s)
+ if len(qs) == 1+len(s)+1 {
+ return qs
+ }
+
+ // Disallow newlines to ensure output is a single line.
+ // Only allow printable runes for readability purposes.
+ rawInvalid := func(r rune) bool {
+ return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
+ }
+ if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
+ return "`" + s + "`"
+ }
+ return qs
+}
+
+// formatHex prints u as a hexadecimal integer in Go notation.
+func formatHex(u uint64) string {
+ var f string
+ switch {
+ case u <= 0xff:
+ f = "0x%02x"
+ case u <= 0xffff:
+ f = "0x%04x"
+ case u <= 0xffffff:
+ f = "0x%06x"
+ case u <= 0xffffffff:
+ f = "0x%08x"
+ case u <= 0xffffffffff:
+ f = "0x%010x"
+ case u <= 0xffffffffffff:
+ f = "0x%012x"
+ case u <= 0xffffffffffffff:
+ f = "0x%014x"
+ case u <= 0xffffffffffffffff:
+ f = "0x%016x"
+ }
+ return fmt.Sprintf(f, u)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go
new file mode 100644
index 0000000..23e444f
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go
@@ -0,0 +1,614 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/diff"
+)
+
+// CanFormatDiffSlice reports whether we support custom formatting for nodes
+// that are slices of primitive kinds or strings.
+func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
+ switch {
+ case opts.DiffMode != diffUnknown:
+ return false // Must be formatting in diff mode
+ case v.NumDiff == 0:
+ return false // No differences detected
+ case !v.ValueX.IsValid() || !v.ValueY.IsValid():
+ return false // Both values must be valid
+ case v.NumIgnored > 0:
+ return false // Some ignore option was used
+ case v.NumTransformed > 0:
+ return false // Some transform option was used
+ case v.NumCompared > 1:
+ return false // More than one comparison was used
+ case v.NumCompared == 1 && v.Type.Name() != "":
+ // The need for cmp to check applicability of options on every element
+ // in a slice is a significant performance detriment for large []byte.
+ // The workaround is to specify Comparer(bytes.Equal),
+ // which enables cmp to compare []byte more efficiently.
+ // If they differ, we still want to provide batched diffing.
+ // The logic disallows named types since they tend to have their own
+ // String method, with nicer formatting than what this provides.
+ return false
+ }
+
+ // Check whether this is an interface with the same concrete types.
+ t := v.Type
+ vx, vy := v.ValueX, v.ValueY
+ if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() {
+ vx, vy = vx.Elem(), vy.Elem()
+ t = vx.Type()
+ }
+
+ // Check whether we provide specialized diffing for this type.
+ switch t.Kind() {
+ case reflect.String:
+ case reflect.Array, reflect.Slice:
+ // Only slices of primitive types have specialized handling.
+ switch t.Elem().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ default:
+ return false
+ }
+
+ // Both slice values have to be non-empty.
+ if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) {
+ return false
+ }
+
+ // If a sufficient number of elements already differ,
+ // use specialized formatting even if length requirement is not met.
+ if v.NumDiff > v.NumSame {
+ return true
+ }
+ default:
+ return false
+ }
+
+ // Use specialized string diffing for longer slices or strings.
+ const minLength = 32
+ return vx.Len() >= minLength && vy.Len() >= minLength
+}
+
+// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
+// This provides custom-tailored logic to make printing of differences in
+// textual strings and slices of primitive kinds more readable.
+func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
+ assert(opts.DiffMode == diffUnknown)
+ t, vx, vy := v.Type, v.ValueX, v.ValueY
+ if t.Kind() == reflect.Interface {
+ vx, vy = vx.Elem(), vy.Elem()
+ t = vx.Type()
+ opts = opts.WithTypeMode(emitType)
+ }
+
+ // Auto-detect the type of the data.
+ var sx, sy string
+ var ssx, ssy []string
+ var isString, isMostlyText, isPureLinedText, isBinary bool
+ switch {
+ case t.Kind() == reflect.String:
+ sx, sy = vx.String(), vy.String()
+ isString = true
+ case t.Kind() == reflect.Slice && t.Elem() == byteType:
+ sx, sy = string(vx.Bytes()), string(vy.Bytes())
+ isString = true
+ case t.Kind() == reflect.Array:
+ // Arrays need to be addressable for slice operations to work.
+ vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
+ vx2.Set(vx)
+ vy2.Set(vy)
+ vx, vy = vx2, vy2
+ }
+ if isString {
+ var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
+ for i, r := range sx + sy {
+ numTotalRunes++
+ if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
+ numValidRunes++
+ }
+ if r == '\n' {
+ if maxLineLen < i-lastLineIdx {
+ maxLineLen = i - lastLineIdx
+ }
+ lastLineIdx = i + 1
+ numLines++
+ }
+ }
+ isPureText := numValidRunes == numTotalRunes
+ isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
+ isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
+ isBinary = !isMostlyText
+
+ // Avoid diffing by lines if it produces a significantly more complex
+ // edit script than diffing by bytes.
+ if isPureLinedText {
+ ssx = strings.Split(sx, "\n")
+ ssy = strings.Split(sy, "\n")
+ esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
+ return diff.BoolResult(ssx[ix] == ssy[iy])
+ })
+ esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
+ return diff.BoolResult(sx[ix] == sy[iy])
+ })
+ efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
+ efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
+ quotedLength := len(strconv.Quote(sx + sy))
+ unquotedLength := len(sx) + len(sy)
+ escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
+ isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
+ }
+ }
+
+ // Format the string into printable records.
+ var list textList
+ var delim string
+ switch {
+ // If the text appears to be multi-lined text,
+ // then perform differencing across individual lines.
+ case isPureLinedText:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
+ func(v reflect.Value, d diffMode) textRecord {
+ s := formatString(v.Index(0).String())
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+ delim = "\n"
+
+ // If possible, use a custom triple-quote (""") syntax for printing
+ // differences in a string literal. This format is more readable,
+ // but has edge-cases where differences are visually indistinguishable.
+ // This format is avoided under the following conditions:
+ // - A line starts with `"""`
+ // - A line starts with "..."
+ // - A line contains non-printable characters
+ // - Adjacent different lines differ only by whitespace
+ //
+ // For example:
+ //
+ // """
+ // ... // 3 identical lines
+ // foo
+ // bar
+ // - baz
+ // + BAZ
+ // """
+ isTripleQuoted := true
+ prevRemoveLines := map[string]bool{}
+ prevInsertLines := map[string]bool{}
+ var list2 textList
+ list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
+ for _, r := range list {
+ if !r.Value.Equal(textEllipsis) {
+ line, _ := strconv.Unquote(string(r.Value.(textLine)))
+ line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
+ normLine := strings.Map(func(r rune) rune {
+ if unicode.IsSpace(r) {
+ return -1 // drop whitespace to avoid visually indistinguishable output
+ }
+ return r
+ }, line)
+ isPrintable := func(r rune) bool {
+ return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
+ }
+ isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
+ switch r.Diff {
+ case diffRemoved:
+ isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
+ prevRemoveLines[normLine] = true
+ case diffInserted:
+ isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
+ prevInsertLines[normLine] = true
+ }
+ if !isTripleQuoted {
+ break
+ }
+ r.Value = textLine(line)
+ r.ElideComma = true
+ }
+ if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
+ prevRemoveLines = map[string]bool{}
+ prevInsertLines = map[string]bool{}
+ }
+ list2 = append(list2, r)
+ }
+ if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
+ list2 = list2[:len(list2)-1] // elide single empty line at the end
+ }
+ list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
+ if isTripleQuoted {
+ var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
+ switch t.Kind() {
+ case reflect.String:
+ if t != stringType {
+ out = opts.FormatType(t, out)
+ }
+ case reflect.Slice:
+ // Always emit type for slices since the triple-quote syntax
+ // looks like a string (not a slice).
+ opts = opts.WithTypeMode(emitType)
+ out = opts.FormatType(t, out)
+ }
+ return out
+ }
+
+ // If the text appears to be single-lined text,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The output is printed as quoted strings.
+ case isMostlyText:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
+ func(v reflect.Value, d diffMode) textRecord {
+ s := formatString(v.String())
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+
+ // If the text appears to be binary data,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The output is inspired by hexdump.
+ case isBinary:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
+ func(v reflect.Value, d diffMode) textRecord {
+ var ss []string
+ for i := 0; i < v.Len(); i++ {
+ ss = append(ss, formatHex(v.Index(i).Uint()))
+ }
+ s := strings.Join(ss, ", ")
+ comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
+ return textRecord{Diff: d, Value: textLine(s), Comment: comment}
+ },
+ )
+
+ // For all other slices of primitive types,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The size of each chunk depends on the width of the element kind.
+ default:
+ var chunkSize int
+ if t.Elem().Kind() == reflect.Bool {
+ chunkSize = 16
+ } else {
+ switch t.Elem().Bits() {
+ case 8:
+ chunkSize = 16
+ case 16:
+ chunkSize = 12
+ case 32:
+ chunkSize = 8
+ default:
+ chunkSize = 8
+ }
+ }
+ list = opts.formatDiffSlice(
+ vx, vy, chunkSize, t.Elem().Kind().String(),
+ func(v reflect.Value, d diffMode) textRecord {
+ var ss []string
+ for i := 0; i < v.Len(); i++ {
+ switch t.Elem().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ ss = append(ss, fmt.Sprint(v.Index(i).Int()))
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
+ case reflect.Uint8, reflect.Uintptr:
+ ss = append(ss, formatHex(v.Index(i).Uint()))
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
+ }
+ }
+ s := strings.Join(ss, ", ")
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+ }
+
+ // Wrap the output with appropriate type information.
+ var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ if !isMostlyText {
+ // The "{...}" byte-sequence literal is not valid Go syntax for strings.
+ // Emit the type for extra clarity (e.g. "string{...}").
+ if t.Kind() == reflect.String {
+ opts = opts.WithTypeMode(emitType)
+ }
+ return opts.FormatType(t, out)
+ }
+ switch t.Kind() {
+ case reflect.String:
+ out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
+ if t != stringType {
+ out = opts.FormatType(t, out)
+ }
+ case reflect.Slice:
+ out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
+ if t != bytesType {
+ out = opts.FormatType(t, out)
+ }
+ }
+ return out
+}
+
+// formatASCII formats s as an ASCII string.
+// This is useful for printing binary strings in a semi-legible way.
+func formatASCII(s string) string {
+ b := bytes.Repeat([]byte{'.'}, len(s))
+ for i := 0; i < len(s); i++ {
+ if ' ' <= s[i] && s[i] <= '~' {
+ b[i] = s[i]
+ }
+ }
+ return string(b)
+}
+
+func (opts formatOptions) formatDiffSlice(
+ vx, vy reflect.Value, chunkSize int, name string,
+ makeRec func(reflect.Value, diffMode) textRecord,
+) (list textList) {
+ eq := func(ix, iy int) bool {
+ return vx.Index(ix).Interface() == vy.Index(iy).Interface()
+ }
+ es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
+ return diff.BoolResult(eq(ix, iy))
+ })
+
+ appendChunks := func(v reflect.Value, d diffMode) int {
+ n0 := v.Len()
+ for v.Len() > 0 {
+ n := chunkSize
+ if n > v.Len() {
+ n = v.Len()
+ }
+ list = append(list, makeRec(v.Slice(0, n), d))
+ v = v.Slice(n, v.Len())
+ }
+ return n0 - v.Len()
+ }
+
+ var numDiffs int
+ maxLen := -1
+ if opts.LimitVerbosity {
+ maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
+ opts.VerbosityLevel--
+ }
+
+ groups := coalesceAdjacentEdits(name, es)
+ groups = coalesceInterveningIdentical(groups, chunkSize/4)
+ groups = cleanupSurroundingIdentical(groups, eq)
+ maxGroup := diffStats{Name: name}
+ for i, ds := range groups {
+ if maxLen >= 0 && numDiffs >= maxLen {
+ maxGroup = maxGroup.Append(ds)
+ continue
+ }
+
+ // Print equal.
+ if ds.NumDiff() == 0 {
+ // Compute the number of leading and trailing equal bytes to print.
+ var numLo, numHi int
+ numEqual := ds.NumIgnored + ds.NumIdentical
+ for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
+ numLo++
+ }
+ for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+ numHi++
+ }
+ if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
+ numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
+ }
+
+ // Print the equal bytes.
+ appendChunks(vx.Slice(0, numLo), diffIdentical)
+ if numEqual > numLo+numHi {
+ ds.NumIdentical -= numLo + numHi
+ list.AppendEllipsis(ds)
+ }
+ appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
+ vx = vx.Slice(numEqual, vx.Len())
+ vy = vy.Slice(numEqual, vy.Len())
+ continue
+ }
+
+ // Print unequal.
+ len0 := len(list)
+ nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
+ vx = vx.Slice(nx, vx.Len())
+ ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
+ vy = vy.Slice(ny, vy.Len())
+ numDiffs += len(list) - len0
+ }
+ if maxGroup.IsZero() {
+ assert(vx.Len() == 0 && vy.Len() == 0)
+ } else {
+ list.AppendEllipsis(maxGroup)
+ }
+ return list
+}
+
+// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
+// equal or unequal counts.
+//
+// Example:
+//
+// Input: "..XXY...Y"
+// Output: [
+// {NumIdentical: 2},
+// {NumRemoved: 2, NumInserted 1},
+// {NumIdentical: 3},
+// {NumInserted: 1},
+// ]
+func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
+ var prevMode byte
+ lastStats := func(mode byte) *diffStats {
+ if prevMode != mode {
+ groups = append(groups, diffStats{Name: name})
+ prevMode = mode
+ }
+ return &groups[len(groups)-1]
+ }
+ for _, e := range es {
+ switch e {
+ case diff.Identity:
+ lastStats('=').NumIdentical++
+ case diff.UniqueX:
+ lastStats('!').NumRemoved++
+ case diff.UniqueY:
+ lastStats('!').NumInserted++
+ case diff.Modified:
+ lastStats('!').NumModified++
+ }
+ }
+ return groups
+}
+
+// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
+// equal groups into adjacent unequal groups that currently result in a
+// dual inserted/removed printout. This acts as a high-pass filter to smooth
+// out high-frequency changes within the windowSize.
+//
+// Example:
+//
+// WindowSize: 16,
+// Input: [
+// {NumIdentical: 61}, // group 0
+// {NumRemoved: 3, NumInserted: 1}, // group 1
+// {NumIdentical: 6}, // ├── coalesce
+// {NumInserted: 2}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 9}, // └── coalesce
+// {NumIdentical: 64}, // group 2
+// {NumRemoved: 3, NumInserted: 1}, // group 3
+// {NumIdentical: 6}, // ├── coalesce
+// {NumInserted: 2}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 7}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 2}, // └── coalesce
+// {NumIdentical: 63}, // group 4
+// ]
+// Output: [
+// {NumIdentical: 61},
+// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
+// {NumIdentical: 64},
+// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
+// {NumIdentical: 63},
+// ]
+func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
+ groups, groupsOrig := groups[:0], groups
+ for i, ds := range groupsOrig {
+ if len(groups) >= 2 && ds.NumDiff() > 0 {
+ prev := &groups[len(groups)-2] // Unequal group
+ curr := &groups[len(groups)-1] // Equal group
+ next := &groupsOrig[i] // Unequal group
+ hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
+ hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
+ if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
+ *prev = prev.Append(*curr).Append(*next)
+ groups = groups[:len(groups)-1] // Truncate off equal group
+ continue
+ }
+ }
+ groups = append(groups, ds)
+ }
+ return groups
+}
+
+// cleanupSurroundingIdentical scans through all unequal groups, and
+// moves any leading sequence of equal elements to the preceding equal group and
+// moves and trailing sequence of equal elements to the succeeding equal group.
+//
+// This is necessary since coalesceInterveningIdentical may coalesce edit groups
+// together such that leading/trailing spans of equal elements becomes possible.
+// Note that this can occur even with an optimal diffing algorithm.
+//
+// Example:
+//
+// Input: [
+// {NumIdentical: 61},
+// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
+// {NumIdentical: 67},
+// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements
+// {NumIdentical: 54},
+// ]
+// Output: [
+// {NumIdentical: 64}, // incremented by 3
+// {NumRemoved: 9},
+// {NumIdentical: 67},
+// {NumRemoved: 9},
+// {NumIdentical: 64}, // incremented by 10
+// ]
+func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
+ var ix, iy int // indexes into sequence x and y
+ for i, ds := range groups {
+ // Handle equal group.
+ if ds.NumDiff() == 0 {
+ ix += ds.NumIdentical
+ iy += ds.NumIdentical
+ continue
+ }
+
+ // Handle unequal group.
+ nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
+ ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
+ var numLeadingIdentical, numTrailingIdentical int
+ for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ {
+ numLeadingIdentical++
+ }
+ for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ {
+ numTrailingIdentical++
+ }
+ if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
+ if numLeadingIdentical > 0 {
+ // Remove leading identical span from this group and
+ // insert it into the preceding group.
+ if i-1 >= 0 {
+ groups[i-1].NumIdentical += numLeadingIdentical
+ } else {
+ // No preceding group exists, so prepend a new group,
+ // but do so after we finish iterating over all groups.
+ defer func() {
+ groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
+ }()
+ }
+ // Increment indexes since the preceding group would have handled this.
+ ix += numLeadingIdentical
+ iy += numLeadingIdentical
+ }
+ if numTrailingIdentical > 0 {
+ // Remove trailing identical span from this group and
+ // insert it into the succeeding group.
+ if i+1 < len(groups) {
+ groups[i+1].NumIdentical += numTrailingIdentical
+ } else {
+ // No succeeding group exists, so append a new group,
+ // but do so after we finish iterating over all groups.
+ defer func() {
+ groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
+ }()
+ }
+ // Do not increment indexes since the succeeding group will handle this.
+ }
+
+ // Update this group since some identical elements were removed.
+ nx -= numIdentical
+ ny -= numIdentical
+ groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
+ }
+ ix += nx
+ iy += ny
+ }
+ return groups
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go
new file mode 100644
index 0000000..388fcf5
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_text.go
@@ -0,0 +1,432 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+)
+
+var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
+
+const maxColumnLength = 80
+
+type indentMode int
+
+func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
+ // The output of Diff is documented as being unstable to provide future
+ // flexibility in changing the output for more humanly readable reports.
+ // This logic intentionally introduces instability to the exact output
+ // so that users can detect accidental reliance on stability early on,
+ // rather than much later when an actual change to the format occurs.
+ if flags.Deterministic || randBool {
+ // Use regular spaces (U+0020).
+ switch d {
+ case diffUnknown, diffIdentical:
+ b = append(b, " "...)
+ case diffRemoved:
+ b = append(b, "- "...)
+ case diffInserted:
+ b = append(b, "+ "...)
+ }
+ } else {
+ // Use non-breaking spaces (U+00a0).
+ switch d {
+ case diffUnknown, diffIdentical:
+ b = append(b, " "...)
+ case diffRemoved:
+ b = append(b, "- "...)
+ case diffInserted:
+ b = append(b, "+ "...)
+ }
+ }
+ return repeatCount(n).appendChar(b, '\t')
+}
+
+type repeatCount int
+
+func (n repeatCount) appendChar(b []byte, c byte) []byte {
+ for ; n > 0; n-- {
+ b = append(b, c)
+ }
+ return b
+}
+
+// textNode is a simplified tree-based representation of structured text.
+// Possible node types are textWrap, textList, or textLine.
+type textNode interface {
+ // Len reports the length in bytes of a single-line version of the tree.
+ // Nested textRecord.Diff and textRecord.Comment fields are ignored.
+ Len() int
+ // Equal reports whether the two trees are structurally identical.
+ // Nested textRecord.Diff and textRecord.Comment fields are compared.
+ Equal(textNode) bool
+ // String returns the string representation of the text tree.
+ // It is not guaranteed that len(x.String()) == x.Len(),
+ // nor that x.String() == y.String() implies that x.Equal(y).
+ String() string
+
+ // formatCompactTo formats the contents of the tree as a single-line string
+ // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
+ // fields are ignored.
+ //
+ // However, not all nodes in the tree should be collapsed as a single-line.
+ // If a node can be collapsed as a single-line, it is replaced by a textLine
+ // node. Since the top-level node cannot replace itself, this also returns
+ // the current node itself.
+ //
+ // This does not mutate the receiver.
+ formatCompactTo([]byte, diffMode) ([]byte, textNode)
+ // formatExpandedTo formats the contents of the tree as a multi-line string
+ // to the provided buffer. In order for column alignment to operate well,
+ // formatCompactTo must be called before calling formatExpandedTo.
+ formatExpandedTo([]byte, diffMode, indentMode) []byte
+}
+
+// textWrap is a wrapper that concatenates a prefix and/or a suffix
+// to the underlying node.
+type textWrap struct {
+ Prefix string // e.g., "bytes.Buffer{"
+ Value textNode // textWrap | textList | textLine
+ Suffix string // e.g., "}"
+ Metadata interface{} // arbitrary metadata; has no effect on formatting
+}
+
+func (s *textWrap) Len() int {
+ return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
+}
+func (s1 *textWrap) Equal(s2 textNode) bool {
+ if s2, ok := s2.(*textWrap); ok {
+ return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
+ }
+ return false
+}
+func (s *textWrap) String() string {
+ var d diffMode
+ var n indentMode
+ _, s2 := s.formatCompactTo(nil, d)
+ b := n.appendIndent(nil, d) // Leading indent
+ b = s2.formatExpandedTo(b, d, n) // Main body
+ b = append(b, '\n') // Trailing newline
+ return string(b)
+}
+func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ n0 := len(b) // Original buffer length
+ b = append(b, s.Prefix...)
+ b, s.Value = s.Value.formatCompactTo(b, d)
+ b = append(b, s.Suffix...)
+ if _, ok := s.Value.(textLine); ok {
+ return b, textLine(b[n0:])
+ }
+ return b, s
+}
+func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+ b = append(b, s.Prefix...)
+ b = s.Value.formatExpandedTo(b, d, n)
+ b = append(b, s.Suffix...)
+ return b
+}
+
+// textList is a comma-separated list of textWrap or textLine nodes.
+// The list may be formatted as multi-lines or single-line at the discretion
+// of the textList.formatCompactTo method.
+type textList []textRecord
+type textRecord struct {
+ Diff diffMode // e.g., 0 or '-' or '+'
+ Key string // e.g., "MyField"
+ Value textNode // textWrap | textLine
+ ElideComma bool // avoid trailing comma
+ Comment fmt.Stringer // e.g., "6 identical fields"
+}
+
+// AppendEllipsis appends a new ellipsis node to the list if none already
+// exists at the end. If cs is non-zero it coalesces the statistics with the
+// previous diffStats.
+func (s *textList) AppendEllipsis(ds diffStats) {
+ hasStats := !ds.IsZero()
+ if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
+ if hasStats {
+ *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
+ } else {
+ *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
+ }
+ return
+ }
+ if hasStats {
+ (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
+ }
+}
+
+func (s textList) Len() (n int) {
+ for i, r := range s {
+ n += len(r.Key)
+ if r.Key != "" {
+ n += len(": ")
+ }
+ n += r.Value.Len()
+ if i < len(s)-1 {
+ n += len(", ")
+ }
+ }
+ return n
+}
+
+func (s1 textList) Equal(s2 textNode) bool {
+ if s2, ok := s2.(textList); ok {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ r1, r2 := s1[i], s2[i]
+ if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func (s textList) String() string {
+ return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
+}
+
+func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ s = append(textList(nil), s...) // Avoid mutating original
+
+ // Determine whether we can collapse this list as a single line.
+ n0 := len(b) // Original buffer length
+ var multiLine bool
+ for i, r := range s {
+ if r.Diff == diffInserted || r.Diff == diffRemoved {
+ multiLine = true
+ }
+ b = append(b, r.Key...)
+ if r.Key != "" {
+ b = append(b, ": "...)
+ }
+ b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
+ if _, ok := s[i].Value.(textLine); !ok {
+ multiLine = true
+ }
+ if r.Comment != nil {
+ multiLine = true
+ }
+ if i < len(s)-1 {
+ b = append(b, ", "...)
+ }
+ }
+ // Force multi-lined output when printing a removed/inserted node that
+ // is sufficiently long.
+ if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
+ multiLine = true
+ }
+ if !multiLine {
+ return b, textLine(b[n0:])
+ }
+ return b, s
+}
+
+func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+ alignKeyLens := s.alignLens(
+ func(r textRecord) bool {
+ _, isLine := r.Value.(textLine)
+ return r.Key == "" || !isLine
+ },
+ func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
+ )
+ alignValueLens := s.alignLens(
+ func(r textRecord) bool {
+ _, isLine := r.Value.(textLine)
+ return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
+ },
+ func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
+ )
+
+ // Format lists of simple lists in a batched form.
+ // If the list is sequence of only textLine values,
+ // then batch multiple values on a single line.
+ var isSimple bool
+ for _, r := range s {
+ _, isLine := r.Value.(textLine)
+ isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
+ if !isSimple {
+ break
+ }
+ }
+ if isSimple {
+ n++
+ var batch []byte
+ emitBatch := func() {
+ if len(batch) > 0 {
+ b = n.appendIndent(append(b, '\n'), d)
+ b = append(b, bytes.TrimRight(batch, " ")...)
+ batch = batch[:0]
+ }
+ }
+ for _, r := range s {
+ line := r.Value.(textLine)
+ if len(batch)+len(line)+len(", ") > maxColumnLength {
+ emitBatch()
+ }
+ batch = append(batch, line...)
+ batch = append(batch, ", "...)
+ }
+ emitBatch()
+ n--
+ return n.appendIndent(append(b, '\n'), d)
+ }
+
+ // Format the list as a multi-lined output.
+ n++
+ for i, r := range s {
+ b = n.appendIndent(append(b, '\n'), d|r.Diff)
+ if r.Key != "" {
+ b = append(b, r.Key+": "...)
+ }
+ b = alignKeyLens[i].appendChar(b, ' ')
+
+ b = r.Value.formatExpandedTo(b, d|r.Diff, n)
+ if !r.ElideComma {
+ b = append(b, ',')
+ }
+ b = alignValueLens[i].appendChar(b, ' ')
+
+ if r.Comment != nil {
+ b = append(b, " // "+r.Comment.String()...)
+ }
+ }
+ n--
+
+ return n.appendIndent(append(b, '\n'), d)
+}
+
+func (s textList) alignLens(
+ skipFunc func(textRecord) bool,
+ lenFunc func(textRecord) int,
+) []repeatCount {
+ var startIdx, endIdx, maxLen int
+ lens := make([]repeatCount, len(s))
+ for i, r := range s {
+ if skipFunc(r) {
+ for j := startIdx; j < endIdx && j < len(s); j++ {
+ lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+ }
+ startIdx, endIdx, maxLen = i+1, i+1, 0
+ } else {
+ if maxLen < lenFunc(r) {
+ maxLen = lenFunc(r)
+ }
+ endIdx = i + 1
+ }
+ }
+ for j := startIdx; j < endIdx && j < len(s); j++ {
+ lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+ }
+ return lens
+}
+
+// textLine is a single-line segment of text and is always a leaf node
+// in the textNode tree.
+type textLine []byte
+
+var (
+ textNil = textLine("nil")
+ textEllipsis = textLine("...")
+)
+
+func (s textLine) Len() int {
+ return len(s)
+}
+func (s1 textLine) Equal(s2 textNode) bool {
+ if s2, ok := s2.(textLine); ok {
+ return bytes.Equal([]byte(s1), []byte(s2))
+ }
+ return false
+}
+func (s textLine) String() string {
+ return string(s)
+}
+func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ return append(b, s...), s
+}
+func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
+ return append(b, s...)
+}
+
+type diffStats struct {
+ Name string
+ NumIgnored int
+ NumIdentical int
+ NumRemoved int
+ NumInserted int
+ NumModified int
+}
+
+func (s diffStats) IsZero() bool {
+ s.Name = ""
+ return s == diffStats{}
+}
+
+func (s diffStats) NumDiff() int {
+ return s.NumRemoved + s.NumInserted + s.NumModified
+}
+
+func (s diffStats) Append(ds diffStats) diffStats {
+ assert(s.Name == ds.Name)
+ s.NumIgnored += ds.NumIgnored
+ s.NumIdentical += ds.NumIdentical
+ s.NumRemoved += ds.NumRemoved
+ s.NumInserted += ds.NumInserted
+ s.NumModified += ds.NumModified
+ return s
+}
+
+// String prints a humanly-readable summary of coalesced records.
+//
+// Example:
+//
+// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
+func (s diffStats) String() string {
+ var ss []string
+ var sum int
+ labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
+ counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
+ for i, n := range counts {
+ if n > 0 {
+ ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
+ }
+ sum += n
+ }
+
+ // Pluralize the name (adjusting for some obscure English grammar rules).
+ name := s.Name
+ if sum > 1 {
+ name += "s"
+ if strings.HasSuffix(name, "ys") {
+ name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
+ }
+ }
+
+ // Format the list according to English grammar (with Oxford comma).
+ switch n := len(ss); n {
+ case 0:
+ return ""
+ case 1, 2:
+ return strings.Join(ss, " and ") + " " + name
+ default:
+ return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
+ }
+}
+
+type commentString string
+
+func (s commentString) String() string { return string(s) }
diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go
new file mode 100644
index 0000000..668d470
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_value.go
@@ -0,0 +1,121 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import "reflect"
+
+// valueNode represents a single node within a report, which is a
+// structured representation of the value tree, containing information
+// regarding which nodes are equal or not.
+type valueNode struct {
+ parent *valueNode
+
+ Type reflect.Type
+ ValueX reflect.Value
+ ValueY reflect.Value
+
+ // NumSame is the number of leaf nodes that are equal.
+ // All descendants are equal only if NumDiff is 0.
+ NumSame int
+ // NumDiff is the number of leaf nodes that are not equal.
+ NumDiff int
+ // NumIgnored is the number of leaf nodes that are ignored.
+ NumIgnored int
+ // NumCompared is the number of leaf nodes that were compared
+ // using an Equal method or Comparer function.
+ NumCompared int
+ // NumTransformed is the number of non-leaf nodes that were transformed.
+ NumTransformed int
+ // NumChildren is the number of transitive descendants of this node.
+ // This counts from zero; thus, leaf nodes have no descendants.
+ NumChildren int
+ // MaxDepth is the maximum depth of the tree. This counts from zero;
+ // thus, leaf nodes have a depth of zero.
+ MaxDepth int
+
+ // Records is a list of struct fields, slice elements, or map entries.
+ Records []reportRecord // If populated, implies Value is not populated
+
+ // Value is the result of a transformation, pointer indirect, of
+ // type assertion.
+ Value *valueNode // If populated, implies Records is not populated
+
+ // TransformerName is the name of the transformer.
+ TransformerName string // If non-empty, implies Value is populated
+}
+type reportRecord struct {
+ Key reflect.Value // Invalid for slice element
+ Value *valueNode
+}
+
+func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
+ vx, vy := ps.Values()
+ child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
+ switch s := ps.(type) {
+ case StructField:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
+ case SliceIndex:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Value: child})
+ case MapIndex:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
+ case Indirect:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ case TypeAssertion:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ case Transform:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ parent.TransformerName = s.Name()
+ parent.NumTransformed++
+ default:
+ assert(parent == nil) // Must be the root step
+ }
+ return child
+}
+
+func (r *valueNode) Report(rs Result) {
+ assert(r.MaxDepth == 0) // May only be called on leaf nodes
+
+ if rs.ByIgnore() {
+ r.NumIgnored++
+ } else {
+ if rs.Equal() {
+ r.NumSame++
+ } else {
+ r.NumDiff++
+ }
+ }
+ assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
+
+ if rs.ByMethod() {
+ r.NumCompared++
+ }
+ if rs.ByFunc() {
+ r.NumCompared++
+ }
+ assert(r.NumCompared <= 1)
+}
+
+func (child *valueNode) PopStep() (parent *valueNode) {
+ if child.parent == nil {
+ return nil
+ }
+ parent = child.parent
+ parent.NumSame += child.NumSame
+ parent.NumDiff += child.NumDiff
+ parent.NumIgnored += child.NumIgnored
+ parent.NumCompared += child.NumCompared
+ parent.NumTransformed += child.NumTransformed
+ parent.NumChildren += child.NumChildren + 1
+ if parent.MaxDepth < child.MaxDepth+1 {
+ parent.MaxDepth = child.MaxDepth + 1
+ }
+ return parent
+}
diff --git a/vendor/github.com/google/pprof/AUTHORS b/vendor/github.com/google/pprof/AUTHORS
new file mode 100644
index 0000000..fd736cb
--- /dev/null
+++ b/vendor/github.com/google/pprof/AUTHORS
@@ -0,0 +1,7 @@
+# This is the official list of pprof authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+# Names should be added to this file as:
+# Name or Organization
+# The email address is not required for organizations.
+Google Inc.
\ No newline at end of file
diff --git a/vendor/github.com/google/pprof/CONTRIBUTORS b/vendor/github.com/google/pprof/CONTRIBUTORS
new file mode 100644
index 0000000..8c8c37d
--- /dev/null
+++ b/vendor/github.com/google/pprof/CONTRIBUTORS
@@ -0,0 +1,16 @@
+# People who have agreed to one of the CLAs and can contribute patches.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# https://developers.google.com/open-source/cla/individual
+# https://developers.google.com/open-source/cla/corporate
+#
+# Names should be added to this file as:
+# Name
+Raul Silvera
+Tipp Moseley
+Hyoun Kyu Cho
+Martin Spier
+Taco de Wolff
+Andrew Hunter
diff --git a/vendor/github.com/google/pprof/LICENSE b/vendor/github.com/google/pprof/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/google/pprof/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go
new file mode 100644
index 0000000..860bb30
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/encode.go
@@ -0,0 +1,591 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+ "errors"
+ "sort"
+ "strings"
+)
+
+func (p *Profile) decoder() []decoder {
+ return profileDecoder
+}
+
+// preEncode populates the unexported fields to be used by encode
+// (with suffix X) from the corresponding exported fields. The
+// exported fields are cleared up to facilitate testing.
+func (p *Profile) preEncode() {
+ strings := make(map[string]int)
+ addString(strings, "")
+
+ for _, st := range p.SampleType {
+ st.typeX = addString(strings, st.Type)
+ st.unitX = addString(strings, st.Unit)
+ }
+
+ for _, s := range p.Sample {
+ s.labelX = nil
+ var keys []string
+ for k := range s.Label {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := s.Label[k]
+ for _, v := range vs {
+ s.labelX = append(s.labelX,
+ label{
+ keyX: addString(strings, k),
+ strX: addString(strings, v),
+ },
+ )
+ }
+ }
+ var numKeys []string
+ for k := range s.NumLabel {
+ numKeys = append(numKeys, k)
+ }
+ sort.Strings(numKeys)
+ for _, k := range numKeys {
+ keyX := addString(strings, k)
+ vs := s.NumLabel[k]
+ units := s.NumUnit[k]
+ for i, v := range vs {
+ var unitX int64
+ if len(units) != 0 {
+ unitX = addString(strings, units[i])
+ }
+ s.labelX = append(s.labelX,
+ label{
+ keyX: keyX,
+ numX: v,
+ unitX: unitX,
+ },
+ )
+ }
+ }
+ s.locationIDX = make([]uint64, len(s.Location))
+ for i, loc := range s.Location {
+ s.locationIDX[i] = loc.ID
+ }
+ }
+
+ for _, m := range p.Mapping {
+ m.fileX = addString(strings, m.File)
+ m.buildIDX = addString(strings, m.BuildID)
+ }
+
+ for _, l := range p.Location {
+ for i, ln := range l.Line {
+ if ln.Function != nil {
+ l.Line[i].functionIDX = ln.Function.ID
+ } else {
+ l.Line[i].functionIDX = 0
+ }
+ }
+ if l.Mapping != nil {
+ l.mappingIDX = l.Mapping.ID
+ } else {
+ l.mappingIDX = 0
+ }
+ }
+ for _, f := range p.Function {
+ f.nameX = addString(strings, f.Name)
+ f.systemNameX = addString(strings, f.SystemName)
+ f.filenameX = addString(strings, f.Filename)
+ }
+
+ p.dropFramesX = addString(strings, p.DropFrames)
+ p.keepFramesX = addString(strings, p.KeepFrames)
+
+ if pt := p.PeriodType; pt != nil {
+ pt.typeX = addString(strings, pt.Type)
+ pt.unitX = addString(strings, pt.Unit)
+ }
+
+ p.commentX = nil
+ for _, c := range p.Comments {
+ p.commentX = append(p.commentX, addString(strings, c))
+ }
+
+ p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
+
+ p.stringTable = make([]string, len(strings))
+ for s, i := range strings {
+ p.stringTable[i] = s
+ }
+}
+
+func (p *Profile) encode(b *buffer) {
+ for _, x := range p.SampleType {
+ encodeMessage(b, 1, x)
+ }
+ for _, x := range p.Sample {
+ encodeMessage(b, 2, x)
+ }
+ for _, x := range p.Mapping {
+ encodeMessage(b, 3, x)
+ }
+ for _, x := range p.Location {
+ encodeMessage(b, 4, x)
+ }
+ for _, x := range p.Function {
+ encodeMessage(b, 5, x)
+ }
+ encodeStrings(b, 6, p.stringTable)
+ encodeInt64Opt(b, 7, p.dropFramesX)
+ encodeInt64Opt(b, 8, p.keepFramesX)
+ encodeInt64Opt(b, 9, p.TimeNanos)
+ encodeInt64Opt(b, 10, p.DurationNanos)
+ if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) {
+ encodeMessage(b, 11, p.PeriodType)
+ }
+ encodeInt64Opt(b, 12, p.Period)
+ encodeInt64s(b, 13, p.commentX)
+ encodeInt64(b, 14, p.defaultSampleTypeX)
+}
+
+var profileDecoder = []decoder{
+ nil, // 0
+ // repeated ValueType sample_type = 1
+ func(b *buffer, m message) error {
+ x := new(ValueType)
+ pp := m.(*Profile)
+ pp.SampleType = append(pp.SampleType, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Sample sample = 2
+ func(b *buffer, m message) error {
+ x := new(Sample)
+ pp := m.(*Profile)
+ pp.Sample = append(pp.Sample, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Mapping mapping = 3
+ func(b *buffer, m message) error {
+ x := new(Mapping)
+ pp := m.(*Profile)
+ pp.Mapping = append(pp.Mapping, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Location location = 4
+ func(b *buffer, m message) error {
+ x := new(Location)
+ x.Line = b.tmpLines[:0] // Use shared space temporarily
+ pp := m.(*Profile)
+ pp.Location = append(pp.Location, x)
+ err := decodeMessage(b, x)
+ b.tmpLines = x.Line[:0]
+ // Copy to shrink size and detach from shared space.
+ x.Line = append([]Line(nil), x.Line...)
+ return err
+ },
+ // repeated Function function = 5
+ func(b *buffer, m message) error {
+ x := new(Function)
+ pp := m.(*Profile)
+ pp.Function = append(pp.Function, x)
+ return decodeMessage(b, x)
+ },
+ // repeated string string_table = 6
+ func(b *buffer, m message) error {
+ err := decodeStrings(b, &m.(*Profile).stringTable)
+ if err != nil {
+ return err
+ }
+ if m.(*Profile).stringTable[0] != "" {
+ return errors.New("string_table[0] must be ''")
+ }
+ return nil
+ },
+ // int64 drop_frames = 7
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) },
+ // int64 keep_frames = 8
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) },
+ // int64 time_nanos = 9
+ func(b *buffer, m message) error {
+ if m.(*Profile).TimeNanos != 0 {
+ return errConcatProfile
+ }
+ return decodeInt64(b, &m.(*Profile).TimeNanos)
+ },
+ // int64 duration_nanos = 10
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) },
+ // ValueType period_type = 11
+ func(b *buffer, m message) error {
+ x := new(ValueType)
+ pp := m.(*Profile)
+ pp.PeriodType = x
+ return decodeMessage(b, x)
+ },
+ // int64 period = 12
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) },
+ // repeated int64 comment = 13
+ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
+ // int64 defaultSampleType = 14
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
+}
+
+// postDecode takes the unexported fields populated by decode (with
+// suffix X) and populates the corresponding exported fields.
+// The unexported fields are cleared up to facilitate testing.
+func (p *Profile) postDecode() error {
+ var err error
+ mappings := make(map[uint64]*Mapping, len(p.Mapping))
+ mappingIds := make([]*Mapping, len(p.Mapping)+1)
+ for _, m := range p.Mapping {
+ m.File, err = getString(p.stringTable, &m.fileX, err)
+ m.BuildID, err = getString(p.stringTable, &m.buildIDX, err)
+ if m.ID < uint64(len(mappingIds)) {
+ mappingIds[m.ID] = m
+ } else {
+ mappings[m.ID] = m
+ }
+
+ // If this a main linux kernel mapping with a relocation symbol suffix
+ // ("[kernel.kallsyms]_text"), extract said suffix.
+ // It is fairly hacky to handle at this level, but the alternatives appear even worse.
+ const prefix = "[kernel.kallsyms]"
+ if strings.HasPrefix(m.File, prefix) {
+ m.KernelRelocationSymbol = m.File[len(prefix):]
+ }
+ }
+
+ functions := make(map[uint64]*Function, len(p.Function))
+ functionIds := make([]*Function, len(p.Function)+1)
+ for _, f := range p.Function {
+ f.Name, err = getString(p.stringTable, &f.nameX, err)
+ f.SystemName, err = getString(p.stringTable, &f.systemNameX, err)
+ f.Filename, err = getString(p.stringTable, &f.filenameX, err)
+ if f.ID < uint64(len(functionIds)) {
+ functionIds[f.ID] = f
+ } else {
+ functions[f.ID] = f
+ }
+ }
+
+ locations := make(map[uint64]*Location, len(p.Location))
+ locationIds := make([]*Location, len(p.Location)+1)
+ for _, l := range p.Location {
+ if id := l.mappingIDX; id < uint64(len(mappingIds)) {
+ l.Mapping = mappingIds[id]
+ } else {
+ l.Mapping = mappings[id]
+ }
+ l.mappingIDX = 0
+ for i, ln := range l.Line {
+ if id := ln.functionIDX; id != 0 {
+ l.Line[i].functionIDX = 0
+ if id < uint64(len(functionIds)) {
+ l.Line[i].Function = functionIds[id]
+ } else {
+ l.Line[i].Function = functions[id]
+ }
+ }
+ }
+ if l.ID < uint64(len(locationIds)) {
+ locationIds[l.ID] = l
+ } else {
+ locations[l.ID] = l
+ }
+ }
+
+ for _, st := range p.SampleType {
+ st.Type, err = getString(p.stringTable, &st.typeX, err)
+ st.Unit, err = getString(p.stringTable, &st.unitX, err)
+ }
+
+ // Pre-allocate space for all locations.
+ numLocations := 0
+ for _, s := range p.Sample {
+ numLocations += len(s.locationIDX)
+ }
+ locBuffer := make([]*Location, numLocations)
+
+ for _, s := range p.Sample {
+ if len(s.labelX) > 0 {
+ labels := make(map[string][]string, len(s.labelX))
+ numLabels := make(map[string][]int64, len(s.labelX))
+ numUnits := make(map[string][]string, len(s.labelX))
+ for _, l := range s.labelX {
+ var key, value string
+ key, err = getString(p.stringTable, &l.keyX, err)
+ if l.strX != 0 {
+ value, err = getString(p.stringTable, &l.strX, err)
+ labels[key] = append(labels[key], value)
+ } else if l.numX != 0 || l.unitX != 0 {
+ numValues := numLabels[key]
+ units := numUnits[key]
+ if l.unitX != 0 {
+ var unit string
+ unit, err = getString(p.stringTable, &l.unitX, err)
+ units = padStringArray(units, len(numValues))
+ numUnits[key] = append(units, unit)
+ }
+ numLabels[key] = append(numLabels[key], l.numX)
+ }
+ }
+ if len(labels) > 0 {
+ s.Label = labels
+ }
+ if len(numLabels) > 0 {
+ s.NumLabel = numLabels
+ for key, units := range numUnits {
+ if len(units) > 0 {
+ numUnits[key] = padStringArray(units, len(numLabels[key]))
+ }
+ }
+ s.NumUnit = numUnits
+ }
+ }
+
+ s.Location = locBuffer[:len(s.locationIDX)]
+ locBuffer = locBuffer[len(s.locationIDX):]
+ for i, lid := range s.locationIDX {
+ if lid < uint64(len(locationIds)) {
+ s.Location[i] = locationIds[lid]
+ } else {
+ s.Location[i] = locations[lid]
+ }
+ }
+ s.locationIDX = nil
+ }
+
+ p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err)
+ p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err)
+
+ if pt := p.PeriodType; pt == nil {
+ p.PeriodType = &ValueType{}
+ }
+
+ if pt := p.PeriodType; pt != nil {
+ pt.Type, err = getString(p.stringTable, &pt.typeX, err)
+ pt.Unit, err = getString(p.stringTable, &pt.unitX, err)
+ }
+
+ for _, i := range p.commentX {
+ var c string
+ c, err = getString(p.stringTable, &i, err)
+ p.Comments = append(p.Comments, c)
+ }
+
+ p.commentX = nil
+ p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
+ p.stringTable = nil
+ return err
+}
+
+// padStringArray pads arr with enough empty strings to make arr
+// length l when arr's length is less than l.
+func padStringArray(arr []string, l int) []string {
+ if l <= len(arr) {
+ return arr
+ }
+ return append(arr, make([]string, l-len(arr))...)
+}
+
+func (p *ValueType) decoder() []decoder {
+ return valueTypeDecoder
+}
+
+func (p *ValueType) encode(b *buffer) {
+ encodeInt64Opt(b, 1, p.typeX)
+ encodeInt64Opt(b, 2, p.unitX)
+}
+
+var valueTypeDecoder = []decoder{
+ nil, // 0
+ // optional int64 type = 1
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) },
+ // optional int64 unit = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) },
+}
+
+func (p *Sample) decoder() []decoder {
+ return sampleDecoder
+}
+
+func (p *Sample) encode(b *buffer) {
+ encodeUint64s(b, 1, p.locationIDX)
+ encodeInt64s(b, 2, p.Value)
+ for _, x := range p.labelX {
+ encodeMessage(b, 3, x)
+ }
+}
+
+var sampleDecoder = []decoder{
+ nil, // 0
+ // repeated uint64 location = 1
+ func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) },
+ // repeated int64 value = 2
+ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) },
+ // repeated Label label = 3
+ func(b *buffer, m message) error {
+ s := m.(*Sample)
+ n := len(s.labelX)
+ s.labelX = append(s.labelX, label{})
+ return decodeMessage(b, &s.labelX[n])
+ },
+}
+
+func (p label) decoder() []decoder {
+ return labelDecoder
+}
+
+func (p label) encode(b *buffer) {
+ encodeInt64Opt(b, 1, p.keyX)
+ encodeInt64Opt(b, 2, p.strX)
+ encodeInt64Opt(b, 3, p.numX)
+ encodeInt64Opt(b, 4, p.unitX)
+}
+
+var labelDecoder = []decoder{
+ nil, // 0
+ // optional int64 key = 1
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) },
+ // optional int64 str = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) },
+ // optional int64 num = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) },
+ // optional int64 num = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) },
+}
+
+func (p *Mapping) decoder() []decoder {
+ return mappingDecoder
+}
+
+func (p *Mapping) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeUint64Opt(b, 2, p.Start)
+ encodeUint64Opt(b, 3, p.Limit)
+ encodeUint64Opt(b, 4, p.Offset)
+ encodeInt64Opt(b, 5, p.fileX)
+ encodeInt64Opt(b, 6, p.buildIDX)
+ encodeBoolOpt(b, 7, p.HasFunctions)
+ encodeBoolOpt(b, 8, p.HasFilenames)
+ encodeBoolOpt(b, 9, p.HasLineNumbers)
+ encodeBoolOpt(b, 10, p.HasInlineFrames)
+}
+
+var mappingDecoder = []decoder{
+ nil, // 0
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10
+}
+
+func (p *Location) decoder() []decoder {
+ return locationDecoder
+}
+
+func (p *Location) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeUint64Opt(b, 2, p.mappingIDX)
+ encodeUint64Opt(b, 3, p.Address)
+ for i := range p.Line {
+ encodeMessage(b, 4, &p.Line[i])
+ }
+ encodeBoolOpt(b, 5, p.IsFolded)
+}
+
+var locationDecoder = []decoder{
+ nil, // 0
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1;
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2;
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3;
+ func(b *buffer, m message) error { // repeated Line line = 4
+ pp := m.(*Location)
+ n := len(pp.Line)
+ pp.Line = append(pp.Line, Line{})
+ return decodeMessage(b, &pp.Line[n])
+ },
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5;
+}
+
+func (p *Line) decoder() []decoder {
+ return lineDecoder
+}
+
+func (p *Line) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.functionIDX)
+ encodeInt64Opt(b, 2, p.Line)
+ encodeInt64Opt(b, 3, p.Column)
+}
+
+var lineDecoder = []decoder{
+ nil, // 0
+ // optional uint64 function_id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
+ // optional int64 line = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
+ // optional int64 column = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) },
+}
+
+func (p *Function) decoder() []decoder {
+ return functionDecoder
+}
+
+func (p *Function) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeInt64Opt(b, 2, p.nameX)
+ encodeInt64Opt(b, 3, p.systemNameX)
+ encodeInt64Opt(b, 4, p.filenameX)
+ encodeInt64Opt(b, 5, p.StartLine)
+}
+
+var functionDecoder = []decoder{
+ nil, // 0
+ // optional uint64 id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) },
+ // optional int64 function_name = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) },
+ // optional int64 function_system_name = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) },
+ // repeated int64 filename = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) },
+ // optional int64 start_line = 5
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) },
+}
+
+func addString(strings map[string]int, s string) int64 {
+ i, ok := strings[s]
+ if !ok {
+ i = len(strings)
+ strings[s] = i
+ }
+ return int64(i)
+}
+
+func getString(strings []string, strng *int64, err error) (string, error) {
+ if err != nil {
+ return "", err
+ }
+ s := int(*strng)
+ if s < 0 || s >= len(strings) {
+ return "", errMalformed
+ }
+ *strng = 0
+ return strings[s], nil
+}
diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go
new file mode 100644
index 0000000..c794b93
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/filter.go
@@ -0,0 +1,274 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+// Implements methods to filter samples from profiles.
+
+import "regexp"
+
+// FilterSamplesByName filters the samples in a profile and only keeps
+// samples where at least one frame matches focus but none match ignore.
+// Returns true is the corresponding regexp matched at least one sample.
+func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
+ if focus == nil && ignore == nil && hide == nil && show == nil {
+ fm = true // Missing focus implies a match
+ return
+ }
+ focusOrIgnore := make(map[uint64]bool)
+ hidden := make(map[uint64]bool)
+ for _, l := range p.Location {
+ if ignore != nil && l.matchesName(ignore) {
+ im = true
+ focusOrIgnore[l.ID] = false
+ } else if focus == nil || l.matchesName(focus) {
+ fm = true
+ focusOrIgnore[l.ID] = true
+ }
+
+ if hide != nil && l.matchesName(hide) {
+ hm = true
+ l.Line = l.unmatchedLines(hide)
+ if len(l.Line) == 0 {
+ hidden[l.ID] = true
+ }
+ }
+ if show != nil {
+ l.Line = l.matchedLines(show)
+ if len(l.Line) == 0 {
+ hidden[l.ID] = true
+ } else {
+ hnm = true
+ }
+ }
+ }
+
+ s := make([]*Sample, 0, len(p.Sample))
+ for _, sample := range p.Sample {
+ if focusedAndNotIgnored(sample.Location, focusOrIgnore) {
+ if len(hidden) > 0 {
+ var locs []*Location
+ for _, loc := range sample.Location {
+ if !hidden[loc.ID] {
+ locs = append(locs, loc)
+ }
+ }
+ if len(locs) == 0 {
+ // Remove sample with no locations (by not adding it to s).
+ continue
+ }
+ sample.Location = locs
+ }
+ s = append(s, sample)
+ }
+ }
+ p.Sample = s
+
+ return
+}
+
+// ShowFrom drops all stack frames above the highest matching frame and returns
+// whether a match was found. If showFrom is nil it returns false and does not
+// modify the profile.
+//
+// Example: consider a sample with frames [A, B, C, B], where A is the root.
+// ShowFrom(nil) returns false and has frames [A, B, C, B].
+// ShowFrom(A) returns true and has frames [A, B, C, B].
+// ShowFrom(B) returns true and has frames [B, C, B].
+// ShowFrom(C) returns true and has frames [C, B].
+// ShowFrom(D) returns false and drops the sample because no frames remain.
+func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) {
+ if showFrom == nil {
+ return false
+ }
+ // showFromLocs stores location IDs that matched ShowFrom.
+ showFromLocs := make(map[uint64]bool)
+ // Apply to locations.
+ for _, loc := range p.Location {
+ if filterShowFromLocation(loc, showFrom) {
+ showFromLocs[loc.ID] = true
+ matched = true
+ }
+ }
+ // For all samples, strip locations after the highest matching one.
+ s := make([]*Sample, 0, len(p.Sample))
+ for _, sample := range p.Sample {
+ for i := len(sample.Location) - 1; i >= 0; i-- {
+ if showFromLocs[sample.Location[i].ID] {
+ sample.Location = sample.Location[:i+1]
+ s = append(s, sample)
+ break
+ }
+ }
+ }
+ p.Sample = s
+ return matched
+}
+
+// filterShowFromLocation tests a showFrom regex against a location, removes
+// lines after the last match and returns whether a match was found. If the
+// mapping is matched, then all lines are kept.
+func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool {
+ if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) {
+ return true
+ }
+ if i := loc.lastMatchedLineIndex(showFrom); i >= 0 {
+ loc.Line = loc.Line[:i+1]
+ return true
+ }
+ return false
+}
+
+// lastMatchedLineIndex returns the index of the last line that matches a regex,
+// or -1 if no match is found.
+func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int {
+ for i := len(loc.Line) - 1; i >= 0; i-- {
+ if fn := loc.Line[i].Function; fn != nil {
+ if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+ return i
+ }
+ }
+ }
+ return -1
+}
+
+// FilterTagsByName filters the tags in a profile and only keeps
+// tags that match show and not hide.
+func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) {
+ matchRemove := func(name string) bool {
+ matchShow := show == nil || show.MatchString(name)
+ matchHide := hide != nil && hide.MatchString(name)
+
+ if matchShow {
+ sm = true
+ }
+ if matchHide {
+ hm = true
+ }
+ return !matchShow || matchHide
+ }
+ for _, s := range p.Sample {
+ for lab := range s.Label {
+ if matchRemove(lab) {
+ delete(s.Label, lab)
+ }
+ }
+ for lab := range s.NumLabel {
+ if matchRemove(lab) {
+ delete(s.NumLabel, lab)
+ }
+ }
+ }
+ return
+}
+
+// matchesName returns whether the location matches the regular
+// expression. It checks any available function names, file names, and
+// mapping object filename.
+func (loc *Location) matchesName(re *regexp.Regexp) bool {
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+ return true
+ }
+ }
+ }
+ if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+ return true
+ }
+ return false
+}
+
+// unmatchedLines returns the lines in the location that do not match
+// the regular expression.
+func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line {
+ if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+ return nil
+ }
+ var lines []Line
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+ continue
+ }
+ }
+ lines = append(lines, ln)
+ }
+ return lines
+}
+
+// matchedLines returns the lines in the location that match
+// the regular expression.
+func (loc *Location) matchedLines(re *regexp.Regexp) []Line {
+ if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+ return loc.Line
+ }
+ var lines []Line
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) {
+ continue
+ }
+ }
+ lines = append(lines, ln)
+ }
+ return lines
+}
+
+// focusedAndNotIgnored looks up a slice of ids against a map of
+// focused/ignored locations. The map only contains locations that are
+// explicitly focused or ignored. Returns whether there is at least
+// one focused location but no ignored locations.
+func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool {
+ var f bool
+ for _, loc := range locs {
+ if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore {
+ if focus {
+ // Found focused location. Must keep searching in case there
+ // is an ignored one as well.
+ f = true
+ } else {
+ // Found ignored location. Can return false right away.
+ return false
+ }
+ }
+ }
+ return f
+}
+
+// TagMatch selects tags for filtering
+type TagMatch func(s *Sample) bool
+
+// FilterSamplesByTag removes all samples from the profile, except
+// those that match focus and do not match the ignore regular
+// expression.
+func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) {
+ samples := make([]*Sample, 0, len(p.Sample))
+ for _, s := range p.Sample {
+ focused, ignored := true, false
+ if focus != nil {
+ focused = focus(s)
+ }
+ if ignore != nil {
+ ignored = ignore(s)
+ }
+ fm = fm || focused
+ im = im || ignored
+ if focused && !ignored {
+ samples = append(samples, s)
+ }
+ }
+ p.Sample = samples
+ return
+}
diff --git a/vendor/github.com/google/pprof/profile/index.go b/vendor/github.com/google/pprof/profile/index.go
new file mode 100644
index 0000000..bef1d60
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/index.go
@@ -0,0 +1,64 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// SampleIndexByName returns the appropriate index for a value of sample index.
+// If numeric, it returns the number, otherwise it looks up the text in the
+// profile sample types.
+func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) {
+ if sampleIndex == "" {
+ if dst := p.DefaultSampleType; dst != "" {
+ for i, t := range sampleTypes(p) {
+ if t == dst {
+ return i, nil
+ }
+ }
+ }
+ // By default select the last sample value
+ return len(p.SampleType) - 1, nil
+ }
+ if i, err := strconv.Atoi(sampleIndex); err == nil {
+ if i < 0 || i >= len(p.SampleType) {
+ return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1)
+ }
+ return i, nil
+ }
+
+ // Remove the inuse_ prefix to support legacy pprof options
+ // "inuse_space" and "inuse_objects" for profiles containing types
+ // "space" and "objects".
+ noInuse := strings.TrimPrefix(sampleIndex, "inuse_")
+ for i, t := range p.SampleType {
+ if t.Type == sampleIndex || t.Type == noInuse {
+ return i, nil
+ }
+ }
+
+ return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p))
+}
+
+func sampleTypes(p *Profile) []string {
+ types := make([]string, len(p.SampleType))
+ for i, t := range p.SampleType {
+ types[i] = t.Type
+ }
+ return types
+}
diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go
new file mode 100644
index 0000000..4580bab
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go
@@ -0,0 +1,315 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements parsers to convert java legacy profiles into
+// the profile.proto format.
+
+package profile
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`)
+ javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`)
+ javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`)
+ javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`)
+ javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`)
+)
+
+// javaCPUProfile returns a new Profile from profilez data.
+// b is the profile bytes after the header, period is the profiling
+// period, and parse is a function to parse 8-byte chunks from the
+// profile in its native endianness.
+func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
+ p := &Profile{
+ Period: period * 1000,
+ PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
+ SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}},
+ }
+ var err error
+ var locs map[uint64]*Location
+ if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil {
+ return nil, err
+ }
+
+ if err = parseJavaLocations(b, locs, p); err != nil {
+ return nil, err
+ }
+
+ // Strip out addresses for better merge.
+ if err = p.Aggregate(true, true, true, true, false, false); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseJavaProfile returns a new profile from heapz or contentionz
+// data. b is the profile bytes after the header.
+func parseJavaProfile(b []byte) (*Profile, error) {
+ h := bytes.SplitAfterN(b, []byte("\n"), 2)
+ if len(h) < 2 {
+ return nil, errUnrecognized
+ }
+
+ p := &Profile{
+ PeriodType: &ValueType{},
+ }
+ header := string(bytes.TrimSpace(h[0]))
+
+ var err error
+ var pType string
+ switch header {
+ case "--- heapz 1 ---":
+ pType = "heap"
+ case "--- contentionz 1 ---":
+ pType = "contention"
+ default:
+ return nil, errUnrecognized
+ }
+
+ if b, err = parseJavaHeader(pType, h[1], p); err != nil {
+ return nil, err
+ }
+ var locs map[uint64]*Location
+ if b, locs, err = parseJavaSamples(pType, b, p); err != nil {
+ return nil, err
+ }
+ if err = parseJavaLocations(b, locs, p); err != nil {
+ return nil, err
+ }
+
+ // Strip out addresses for better merge.
+ if err = p.Aggregate(true, true, true, true, false, false); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseJavaHeader parses the attribute section on a java profile and
+// populates a profile. Returns the remainder of the buffer after all
+// attributes.
+func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) {
+ nextNewLine := bytes.IndexByte(b, byte('\n'))
+ for nextNewLine != -1 {
+ line := string(bytes.TrimSpace(b[0:nextNewLine]))
+ if line != "" {
+ h := attributeRx.FindStringSubmatch(line)
+ if h == nil {
+ // Not a valid attribute, exit.
+ return b, nil
+ }
+
+ attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2])
+ var err error
+ switch pType + "/" + attribute {
+ case "heap/format", "cpu/format", "contention/format":
+ if value != "java" {
+ return nil, errUnrecognized
+ }
+ case "heap/resolution":
+ p.SampleType = []*ValueType{
+ {Type: "inuse_objects", Unit: "count"},
+ {Type: "inuse_space", Unit: value},
+ }
+ case "contention/resolution":
+ p.SampleType = []*ValueType{
+ {Type: "contentions", Unit: "count"},
+ {Type: "delay", Unit: value},
+ }
+ case "contention/sampling period":
+ p.PeriodType = &ValueType{
+ Type: "contentions", Unit: "count",
+ }
+ if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil {
+ return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
+ }
+ case "contention/ms since reset":
+ millis, err := strconv.ParseInt(value, 0, 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
+ }
+ p.DurationNanos = millis * 1000 * 1000
+ default:
+ return nil, errUnrecognized
+ }
+ }
+ // Grab next line.
+ b = b[nextNewLine+1:]
+ nextNewLine = bytes.IndexByte(b, byte('\n'))
+ }
+ return b, nil
+}
+
+// parseJavaSamples parses the samples from a java profile and
+// populates the Samples in a profile. Returns the remainder of the
+// buffer after the samples.
+func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) {
+ nextNewLine := bytes.IndexByte(b, byte('\n'))
+ locs := make(map[uint64]*Location)
+ for nextNewLine != -1 {
+ line := string(bytes.TrimSpace(b[0:nextNewLine]))
+ if line != "" {
+ sample := javaSampleRx.FindStringSubmatch(line)
+ if sample == nil {
+ // Not a valid sample, exit.
+ return b, locs, nil
+ }
+
+ // Java profiles have data/fields inverted compared to other
+ // profile types.
+ var err error
+ value1, value2, value3 := sample[2], sample[1], sample[3]
+ addrs, err := parseHexAddresses(value3)
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ var sloc []*Location
+ for _, addr := range addrs {
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+ s := &Sample{
+ Value: make([]int64, 2),
+ Location: sloc,
+ }
+
+ if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil {
+ return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
+ }
+ if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil {
+ return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
+ }
+
+ switch pType {
+ case "heap":
+ const javaHeapzSamplingRate = 524288 // 512K
+ if s.Value[0] == 0 {
+ return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line)
+ }
+ s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}}
+ s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate)
+ case "contention":
+ if period := p.Period; period != 0 {
+ s.Value[0] = s.Value[0] * p.Period
+ s.Value[1] = s.Value[1] * p.Period
+ }
+ }
+ p.Sample = append(p.Sample, s)
+ }
+ // Grab next line.
+ b = b[nextNewLine+1:]
+ nextNewLine = bytes.IndexByte(b, byte('\n'))
+ }
+ return b, locs, nil
+}
+
+// parseJavaLocations parses the location information in a java
+// profile and populates the Locations in a profile. It uses the
+// location addresses from the profile as both the ID of each
+// location.
+func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error {
+ r := bytes.NewBuffer(b)
+ fns := make(map[string]*Function)
+ for {
+ line, err := r.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return err
+ }
+ if line == "" {
+ break
+ }
+ }
+
+ if line = strings.TrimSpace(line); line == "" {
+ continue
+ }
+
+ jloc := javaLocationRx.FindStringSubmatch(line)
+ if len(jloc) != 3 {
+ continue
+ }
+ addr, err := strconv.ParseUint(jloc[1], 16, 64)
+ if err != nil {
+ return fmt.Errorf("parsing sample %s: %v", line, err)
+ }
+ loc := locs[addr]
+ if loc == nil {
+ // Unused/unseen
+ continue
+ }
+ var lineFunc, lineFile string
+ var lineNo int64
+
+ if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 {
+ // Found a line of the form: "function (file:line)"
+ lineFunc, lineFile = fileLine[1], fileLine[2]
+ if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 {
+ lineNo = n
+ }
+ } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 {
+ // If there's not a file:line, it's a shared library path.
+ // The path isn't interesting, so just give the .so.
+ lineFunc, lineFile = filePath[1], filepath.Base(filePath[2])
+ } else if strings.Contains(jloc[2], "generated stub/JIT") {
+ lineFunc = "STUB"
+ } else {
+ // Treat whole line as the function name. This is used by the
+ // java agent for internal states such as "GC" or "VM".
+ lineFunc = jloc[2]
+ }
+ fn := fns[lineFunc]
+
+ if fn == nil {
+ fn = &Function{
+ Name: lineFunc,
+ SystemName: lineFunc,
+ Filename: lineFile,
+ }
+ fns[lineFunc] = fn
+ p.Function = append(p.Function, fn)
+ }
+ loc.Line = []Line{
+ {
+ Function: fn,
+ Line: lineNo,
+ },
+ }
+ loc.Address = 0
+ }
+
+ p.remapLocationIDs()
+ p.remapFunctionIDs()
+ p.remapMappingIDs()
+
+ return nil
+}
diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go
new file mode 100644
index 0000000..8d07fd6
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/legacy_profile.go
@@ -0,0 +1,1228 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements parsers to convert legacy profiles into the
+// profile.proto format.
+
+package profile
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`)
+ countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`)
+
+ heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`)
+ heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`)
+
+ contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`)
+
+ hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`)
+
+ growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`)
+
+ fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`)
+
+ threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`)
+ threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`)
+
+ // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools.
+ // Recommended format:
+ // Start End object file name offset(optional) linker build id
+ // 0x40000-0x80000 /path/to/binary (@FF00) abc123456
+ spaceDigits = `\s+[[:digit:]]+`
+ hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+`
+ oSpace = `\s*`
+ // Capturing expressions.
+ cHex = `(?:0x)?([[:xdigit:]]+)`
+ cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?`
+ cSpaceString = `(?:\s+(\S+))?`
+ cSpaceHex = `(?:\s+([[:xdigit:]]+))?`
+ cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?`
+ cPerm = `(?:\s+([-rwxp]+))?`
+
+ procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString)
+ briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex)
+
+ // Regular expression to parse log data, of the form:
+ // ... file:line] msg...
+ logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`)
+)
+
+func isSpaceOrComment(line string) bool {
+ trimmed := strings.TrimSpace(line)
+ return len(trimmed) == 0 || trimmed[0] == '#'
+}
+
+// parseGoCount parses a Go count profile (e.g., threadcreate or
+// goroutine) and returns a new Profile.
+func parseGoCount(b []byte) (*Profile, error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ // Skip comments at the beginning of the file.
+ for s.Scan() && isSpaceOrComment(s.Text()) {
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ m := countStartRE.FindStringSubmatch(s.Text())
+ if m == nil {
+ return nil, errUnrecognized
+ }
+ profileType := m[1]
+ p := &Profile{
+ PeriodType: &ValueType{Type: profileType, Unit: "count"},
+ Period: 1,
+ SampleType: []*ValueType{{Type: profileType, Unit: "count"}},
+ }
+ locations := make(map[uint64]*Location)
+ for s.Scan() {
+ line := s.Text()
+ if isSpaceOrComment(line) {
+ continue
+ }
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ m := countRE.FindStringSubmatch(line)
+ if m == nil {
+ return nil, errMalformed
+ }
+ n, err := strconv.ParseInt(m[1], 0, 64)
+ if err != nil {
+ return nil, errMalformed
+ }
+ fields := strings.Fields(m[2])
+ locs := make([]*Location, 0, len(fields))
+ for _, stk := range fields {
+ addr, err := strconv.ParseUint(stk, 0, 64)
+ if err != nil {
+ return nil, errMalformed
+ }
+ // Adjust all frames by -1 to land on top of the call instruction.
+ addr--
+ loc := locations[addr]
+ if loc == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ locations[addr] = loc
+ p.Location = append(p.Location, loc)
+ }
+ locs = append(locs, loc)
+ }
+ p.Sample = append(p.Sample, &Sample{
+ Location: locs,
+ Value: []int64{n},
+ })
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// remapLocationIDs ensures there is a location for each address
+// referenced by a sample, and remaps the samples to point to the new
+// location ids.
+func (p *Profile) remapLocationIDs() {
+ seen := make(map[*Location]bool, len(p.Location))
+ var locs []*Location
+
+ for _, s := range p.Sample {
+ for _, l := range s.Location {
+ if seen[l] {
+ continue
+ }
+ l.ID = uint64(len(locs) + 1)
+ locs = append(locs, l)
+ seen[l] = true
+ }
+ }
+ p.Location = locs
+}
+
+func (p *Profile) remapFunctionIDs() {
+ seen := make(map[*Function]bool, len(p.Function))
+ var fns []*Function
+
+ for _, l := range p.Location {
+ for _, ln := range l.Line {
+ fn := ln.Function
+ if fn == nil || seen[fn] {
+ continue
+ }
+ fn.ID = uint64(len(fns) + 1)
+ fns = append(fns, fn)
+ seen[fn] = true
+ }
+ }
+ p.Function = fns
+}
+
+// remapMappingIDs matches location addresses with existing mappings
+// and updates them appropriately. This is O(N*M), if this ever shows
+// up as a bottleneck, evaluate sorting the mappings and doing a
+// binary search, which would make it O(N*log(M)).
+func (p *Profile) remapMappingIDs() {
+ // Some profile handlers will incorrectly set regions for the main
+ // executable if its section is remapped. Fix them through heuristics.
+
+ if len(p.Mapping) > 0 {
+ // Remove the initial mapping if named '/anon_hugepage' and has a
+ // consecutive adjacent mapping.
+ if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") {
+ if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start {
+ p.Mapping = p.Mapping[1:]
+ }
+ }
+ }
+
+ // Subtract the offset from the start of the main mapping if it
+ // ends up at a recognizable start address.
+ if len(p.Mapping) > 0 {
+ const expectedStart = 0x400000
+ if m := p.Mapping[0]; m.Start-m.Offset == expectedStart {
+ m.Start = expectedStart
+ m.Offset = 0
+ }
+ }
+
+ // Associate each location with an address to the corresponding
+ // mapping. Create fake mapping if a suitable one isn't found.
+ var fake *Mapping
+nextLocation:
+ for _, l := range p.Location {
+ a := l.Address
+ if l.Mapping != nil || a == 0 {
+ continue
+ }
+ for _, m := range p.Mapping {
+ if m.Start <= a && a < m.Limit {
+ l.Mapping = m
+ continue nextLocation
+ }
+ }
+ // Work around legacy handlers failing to encode the first
+ // part of mappings split into adjacent ranges.
+ for _, m := range p.Mapping {
+ if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start {
+ m.Start -= m.Offset
+ m.Offset = 0
+ l.Mapping = m
+ continue nextLocation
+ }
+ }
+ // If there is still no mapping, create a fake one.
+ // This is important for the Go legacy handler, which produced
+ // no mappings.
+ if fake == nil {
+ fake = &Mapping{
+ ID: 1,
+ Limit: ^uint64(0),
+ }
+ p.Mapping = append(p.Mapping, fake)
+ }
+ l.Mapping = fake
+ }
+
+ // Reset all mapping IDs.
+ for i, m := range p.Mapping {
+ m.ID = uint64(i + 1)
+ }
+}
+
+var cpuInts = []func([]byte) (uint64, []byte){
+ get32l,
+ get32b,
+ get64l,
+ get64b,
+}
+
+func get32l(b []byte) (uint64, []byte) {
+ if len(b) < 4 {
+ return 0, nil
+ }
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:]
+}
+
+func get32b(b []byte) (uint64, []byte) {
+ if len(b) < 4 {
+ return 0, nil
+ }
+ return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:]
+}
+
+func get64l(b []byte) (uint64, []byte) {
+ if len(b) < 8 {
+ return 0, nil
+ }
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:]
+}
+
+func get64b(b []byte) (uint64, []byte) {
+ if len(b) < 8 {
+ return 0, nil
+ }
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:]
+}
+
+// parseCPU parses a profilez legacy profile and returns a newly
+// populated Profile.
+//
+// The general format for profilez samples is a sequence of words in
+// binary format. The first words are a header with the following data:
+//
+// 1st word -- 0
+// 2nd word -- 3
+// 3rd word -- 0 if a c++ application, 1 if a java application.
+// 4th word -- Sampling period (in microseconds).
+// 5th word -- Padding.
+func parseCPU(b []byte) (*Profile, error) {
+ var parse func([]byte) (uint64, []byte)
+ var n1, n2, n3, n4, n5 uint64
+ for _, parse = range cpuInts {
+ var tmp []byte
+ n1, tmp = parse(b)
+ n2, tmp = parse(tmp)
+ n3, tmp = parse(tmp)
+ n4, tmp = parse(tmp)
+ n5, tmp = parse(tmp)
+
+ if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 {
+ b = tmp
+ return cpuProfile(b, int64(n4), parse)
+ }
+ if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 {
+ b = tmp
+ return javaCPUProfile(b, int64(n4), parse)
+ }
+ }
+ return nil, errUnrecognized
+}
+
+// cpuProfile returns a new Profile from C++ profilez data.
+// b is the profile bytes after the header, period is the profiling
+// period, and parse is a function to parse 8-byte chunks from the
+// profile in its native endianness.
+func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
+ p := &Profile{
+ Period: period * 1000,
+ PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
+ SampleType: []*ValueType{
+ {Type: "samples", Unit: "count"},
+ {Type: "cpu", Unit: "nanoseconds"},
+ },
+ }
+ var err error
+ if b, _, err = parseCPUSamples(b, parse, true, p); err != nil {
+ return nil, err
+ }
+
+ // If *most* samples have the same second-to-the-bottom frame, it
+ // strongly suggests that it is an uninteresting artifact of
+ // measurement -- a stack frame pushed by the signal handler. The
+ // bottom frame is always correct as it is picked up from the signal
+ // structure, not the stack. Check if this is the case and if so,
+ // remove.
+
+ // Remove up to two frames.
+ maxiter := 2
+ // Allow one different sample for this many samples with the same
+ // second-to-last frame.
+ similarSamples := 32
+ margin := len(p.Sample) / similarSamples
+
+ for iter := 0; iter < maxiter; iter++ {
+ addr1 := make(map[uint64]int)
+ for _, s := range p.Sample {
+ if len(s.Location) > 1 {
+ a := s.Location[1].Address
+ addr1[a] = addr1[a] + 1
+ }
+ }
+
+ for id1, count := range addr1 {
+ if count >= len(p.Sample)-margin {
+ // Found uninteresting frame, strip it out from all samples
+ for _, s := range p.Sample {
+ if len(s.Location) > 1 && s.Location[1].Address == id1 {
+ s.Location = append(s.Location[:1], s.Location[2:]...)
+ }
+ }
+ break
+ }
+ }
+ }
+
+ if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil {
+ return nil, err
+ }
+
+ cleanupDuplicateLocations(p)
+ return p, nil
+}
+
+func cleanupDuplicateLocations(p *Profile) {
+ // The profile handler may duplicate the leaf frame, because it gets
+ // its address both from stack unwinding and from the signal
+ // context. Detect this and delete the duplicate, which has been
+ // adjusted by -1. The leaf address should not be adjusted as it is
+ // not a call.
+ for _, s := range p.Sample {
+ if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 {
+ s.Location = append(s.Location[:1], s.Location[2:]...)
+ }
+ }
+}
+
+// parseCPUSamples parses a collection of profilez samples from a
+// profile.
+//
+// profilez samples are a repeated sequence of stack frames of the
+// form:
+//
+// 1st word -- The number of times this stack was encountered.
+// 2nd word -- The size of the stack (StackSize).
+// 3rd word -- The first address on the stack.
+// ...
+// StackSize + 2 -- The last address on the stack
+//
+// The last stack trace is of the form:
+//
+// 1st word -- 0
+// 2nd word -- 1
+// 3rd word -- 0
+//
+// Addresses from stack traces may point to the next instruction after
+// each call. Optionally adjust by -1 to land somewhere on the actual
+// call (except for the leaf, which is not a call).
+func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) {
+ locs := make(map[uint64]*Location)
+ for len(b) > 0 {
+ var count, nstk uint64
+ count, b = parse(b)
+ nstk, b = parse(b)
+ if b == nil || nstk > uint64(len(b)/4) {
+ return nil, nil, errUnrecognized
+ }
+ var sloc []*Location
+ addrs := make([]uint64, nstk)
+ for i := 0; i < int(nstk); i++ {
+ addrs[i], b = parse(b)
+ }
+
+ if count == 0 && nstk == 1 && addrs[0] == 0 {
+ // End of data marker
+ break
+ }
+ for i, addr := range addrs {
+ if adjust && i > 0 {
+ addr--
+ }
+ loc := locs[addr]
+ if loc == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ locs[addr] = loc
+ p.Location = append(p.Location, loc)
+ }
+ sloc = append(sloc, loc)
+ }
+ p.Sample = append(p.Sample,
+ &Sample{
+ Value: []int64{int64(count), int64(count) * p.Period},
+ Location: sloc,
+ })
+ }
+ // Reached the end without finding the EOD marker.
+ return b, locs, nil
+}
+
+// parseHeap parses a heapz legacy or a growthz profile and
+// returns a newly populated Profile.
+func parseHeap(b []byte) (p *Profile, err error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ if !s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return nil, errUnrecognized
+ }
+ p = &Profile{}
+
+ sampling := ""
+ hasAlloc := false
+
+ line := s.Text()
+ p.PeriodType = &ValueType{Type: "space", Unit: "bytes"}
+ if header := heapHeaderRE.FindStringSubmatch(line); header != nil {
+ sampling, p.Period, hasAlloc, err = parseHeapHeader(line)
+ if err != nil {
+ return nil, err
+ }
+ } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil {
+ p.Period = 1
+ } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil {
+ p.Period = 1
+ } else {
+ return nil, errUnrecognized
+ }
+
+ if hasAlloc {
+ // Put alloc before inuse so that default pprof selection
+ // will prefer inuse_space.
+ p.SampleType = []*ValueType{
+ {Type: "alloc_objects", Unit: "count"},
+ {Type: "alloc_space", Unit: "bytes"},
+ {Type: "inuse_objects", Unit: "count"},
+ {Type: "inuse_space", Unit: "bytes"},
+ }
+ } else {
+ p.SampleType = []*ValueType{
+ {Type: "objects", Unit: "count"},
+ {Type: "space", Unit: "bytes"},
+ }
+ }
+
+ locs := make(map[uint64]*Location)
+ for s.Scan() {
+ line := strings.TrimSpace(s.Text())
+
+ if isSpaceOrComment(line) {
+ continue
+ }
+
+ if isMemoryMapSentinel(line) {
+ break
+ }
+
+ value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc)
+ if err != nil {
+ return nil, err
+ }
+
+ var sloc []*Location
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+
+ p.Sample = append(p.Sample, &Sample{
+ Value: value,
+ Location: sloc,
+ NumLabel: map[string][]int64{"bytes": {blocksize}},
+ })
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) {
+ header := heapHeaderRE.FindStringSubmatch(line)
+ if header == nil {
+ return "", 0, false, errUnrecognized
+ }
+
+ if len(header[6]) > 0 {
+ if period, err = strconv.ParseInt(header[6], 10, 64); err != nil {
+ return "", 0, false, errUnrecognized
+ }
+ }
+
+ if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") {
+ hasAlloc = true
+ }
+
+ switch header[5] {
+ case "heapz_v2", "heap_v2":
+ return "v2", period, hasAlloc, nil
+ case "heapprofile":
+ return "", 1, hasAlloc, nil
+ case "heap":
+ return "v2", period / 2, hasAlloc, nil
+ default:
+ return "", 0, false, errUnrecognized
+ }
+}
+
+// parseHeapSample parses a single row from a heap profile into a new Sample.
+func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) {
+ sampleData := heapSampleRE.FindStringSubmatch(line)
+ if len(sampleData) != 6 {
+ return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData))
+ }
+
+ // This is a local-scoped helper function to avoid needing to pass
+ // around rate, sampling and many return parameters.
+ addValues := func(countString, sizeString string, label string) error {
+ count, err := strconv.ParseInt(countString, 10, 64)
+ if err != nil {
+ return fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ size, err := strconv.ParseInt(sizeString, 10, 64)
+ if err != nil {
+ return fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ if count == 0 && size != 0 {
+ return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size)
+ }
+ if count != 0 {
+ blocksize = size / count
+ if sampling == "v2" {
+ count, size = scaleHeapSample(count, size, rate)
+ }
+ }
+ value = append(value, count, size)
+ return nil
+ }
+
+ if includeAlloc {
+ if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil {
+ return nil, 0, nil, err
+ }
+ }
+
+ if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil {
+ return nil, 0, nil, err
+ }
+
+ addrs, err = parseHexAddresses(sampleData[5])
+ if err != nil {
+ return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ return value, blocksize, addrs, nil
+}
+
+// parseHexAddresses extracts hex numbers from a string, attempts to convert
+// each to an unsigned 64-bit number and returns the resulting numbers as a
+// slice, or an error if the string contains hex numbers which are too large to
+// handle (which means a malformed profile).
+func parseHexAddresses(s string) ([]uint64, error) {
+ hexStrings := hexNumberRE.FindAllString(s, -1)
+ var addrs []uint64
+ for _, s := range hexStrings {
+ if addr, err := strconv.ParseUint(s, 0, 64); err == nil {
+ addrs = append(addrs, addr)
+ } else {
+ return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s)
+ }
+ }
+ return addrs, nil
+}
+
+// scaleHeapSample adjusts the data from a heapz Sample to
+// account for its probability of appearing in the collected
+// data. heapz profiles are a sampling of the memory allocations
+// requests in a program. We estimate the unsampled value by dividing
+// each collected sample by its probability of appearing in the
+// profile. heapz v2 profiles rely on a poisson process to determine
+// which samples to collect, based on the desired average collection
+// rate R. The probability of a sample of size S to appear in that
+// profile is 1-exp(-S/R).
+func scaleHeapSample(count, size, rate int64) (int64, int64) {
+ if count == 0 || size == 0 {
+ return 0, 0
+ }
+
+ if rate <= 1 {
+ // if rate==1 all samples were collected so no adjustment is needed.
+ // if rate<1 treat as unknown and skip scaling.
+ return count, size
+ }
+
+ avgSize := float64(size) / float64(count)
+ scale := 1 / (1 - math.Exp(-avgSize/float64(rate)))
+
+ return int64(float64(count) * scale), int64(float64(size) * scale)
+}
+
+// parseContention parses a mutex or contention profile. There are 2 cases:
+// "--- contentionz " for legacy C++ profiles (and backwards compatibility)
+// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime.
+func parseContention(b []byte) (*Profile, error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ if !s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return nil, errUnrecognized
+ }
+
+ switch l := s.Text(); {
+ case strings.HasPrefix(l, "--- contentionz "):
+ case strings.HasPrefix(l, "--- mutex:"):
+ case strings.HasPrefix(l, "--- contention:"):
+ default:
+ return nil, errUnrecognized
+ }
+
+ p := &Profile{
+ PeriodType: &ValueType{Type: "contentions", Unit: "count"},
+ Period: 1,
+ SampleType: []*ValueType{
+ {Type: "contentions", Unit: "count"},
+ {Type: "delay", Unit: "nanoseconds"},
+ },
+ }
+
+ var cpuHz int64
+ // Parse text of the form "attribute = value" before the samples.
+ const delimiter = "="
+ for s.Scan() {
+ line := s.Text()
+ if line = strings.TrimSpace(line); isSpaceOrComment(line) {
+ continue
+ }
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ attr := strings.SplitN(line, delimiter, 2)
+ if len(attr) != 2 {
+ break
+ }
+ key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])
+ var err error
+ switch key {
+ case "cycles/second":
+ if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ case "sampling period":
+ if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ case "ms since reset":
+ ms, err := strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return nil, errUnrecognized
+ }
+ p.DurationNanos = ms * 1000 * 1000
+ case "format":
+ // CPP contentionz profiles don't have format.
+ return nil, errUnrecognized
+ case "resolution":
+ // CPP contentionz profiles don't have resolution.
+ return nil, errUnrecognized
+ case "discarded samples":
+ default:
+ return nil, errUnrecognized
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ locs := make(map[uint64]*Location)
+ for {
+ line := strings.TrimSpace(s.Text())
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ if !isSpaceOrComment(line) {
+ value, addrs, err := parseContentionSample(line, p.Period, cpuHz)
+ if err != nil {
+ return nil, err
+ }
+ var sloc []*Location
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+ p.Sample = append(p.Sample, &Sample{
+ Value: value,
+ Location: sloc,
+ })
+ }
+ if !s.Scan() {
+ break
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseContentionSample parses a single row from a contention profile
+// into a new Sample.
+func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) {
+ sampleData := contentionSampleRE.FindStringSubmatch(line)
+ if sampleData == nil {
+ return nil, nil, errUnrecognized
+ }
+
+ v1, err := strconv.ParseInt(sampleData[1], 10, 64)
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ v2, err := strconv.ParseInt(sampleData[2], 10, 64)
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ // Unsample values if period and cpuHz are available.
+ // - Delays are scaled to cycles and then to nanoseconds.
+ // - Contentions are scaled to cycles.
+ if period > 0 {
+ if cpuHz > 0 {
+ cpuGHz := float64(cpuHz) / 1e9
+ v1 = int64(float64(v1) * float64(period) / cpuGHz)
+ }
+ v2 = v2 * period
+ }
+
+ value = []int64{v2, v1}
+ addrs, err = parseHexAddresses(sampleData[3])
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ return value, addrs, nil
+}
+
+// parseThread parses a Threadz profile and returns a new Profile.
+func parseThread(b []byte) (*Profile, error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ // Skip past comments and empty lines seeking a real header.
+ for s.Scan() && isSpaceOrComment(s.Text()) {
+ }
+
+ line := s.Text()
+ if m := threadzStartRE.FindStringSubmatch(line); m != nil {
+ // Advance over initial comments until first stack trace.
+ for s.Scan() {
+ if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") {
+ break
+ }
+ }
+ } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
+ return nil, errUnrecognized
+ }
+
+ p := &Profile{
+ SampleType: []*ValueType{{Type: "thread", Unit: "count"}},
+ PeriodType: &ValueType{Type: "thread", Unit: "count"},
+ Period: 1,
+ }
+
+ locs := make(map[uint64]*Location)
+ // Recognize each thread and populate profile samples.
+ for !isMemoryMapSentinel(line) {
+ if strings.HasPrefix(line, "---- no stack trace for") {
+ break
+ }
+ if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
+ return nil, errUnrecognized
+ }
+
+ var addrs []uint64
+ var err error
+ line, addrs, err = parseThreadSample(s)
+ if err != nil {
+ return nil, err
+ }
+ if len(addrs) == 0 {
+ // We got a --same as previous threads--. Bump counters.
+ if len(p.Sample) > 0 {
+ s := p.Sample[len(p.Sample)-1]
+ s.Value[0]++
+ }
+ continue
+ }
+
+ var sloc []*Location
+ for i, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call
+ // (except for the leaf, which is not a call).
+ if i > 0 {
+ addr--
+ }
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+
+ p.Sample = append(p.Sample, &Sample{
+ Value: []int64{1},
+ Location: sloc,
+ })
+ }
+
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+
+ cleanupDuplicateLocations(p)
+ return p, nil
+}
+
+// parseThreadSample parses a symbolized or unsymbolized stack trace.
+// Returns the first line after the traceback, the sample (or nil if
+// it hits a 'same-as-previous' marker) and an error.
+func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) {
+ var line string
+ sameAsPrevious := false
+ for s.Scan() {
+ line = strings.TrimSpace(s.Text())
+ if line == "" {
+ continue
+ }
+
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ if strings.Contains(line, "same as previous thread") {
+ sameAsPrevious = true
+ continue
+ }
+
+ curAddrs, err := parseHexAddresses(line)
+ if err != nil {
+ return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ addrs = append(addrs, curAddrs...)
+ }
+ if err := s.Err(); err != nil {
+ return "", nil, err
+ }
+ if sameAsPrevious {
+ return line, nil, nil
+ }
+ return line, addrs, nil
+}
+
+// parseAdditionalSections parses any additional sections in the
+// profile, ignoring any unrecognized sections.
+func parseAdditionalSections(s *bufio.Scanner, p *Profile) error {
+ for !isMemoryMapSentinel(s.Text()) && s.Scan() {
+ }
+ if err := s.Err(); err != nil {
+ return err
+ }
+ return p.ParseMemoryMapFromScanner(s)
+}
+
+// ParseProcMaps parses a memory map in the format of /proc/self/maps.
+// ParseMemoryMap should be called after setting on a profile to
+// associate locations to the corresponding mapping based on their
+// address.
+func ParseProcMaps(rd io.Reader) ([]*Mapping, error) {
+ s := bufio.NewScanner(rd)
+ return parseProcMapsFromScanner(s)
+}
+
+func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) {
+ var mapping []*Mapping
+
+ var attrs []string
+ const delimiter = "="
+ r := strings.NewReplacer()
+ for s.Scan() {
+ line := r.Replace(removeLoggingInfo(s.Text()))
+ m, err := parseMappingEntry(line)
+ if err != nil {
+ if err == errUnrecognized {
+ // Recognize assignments of the form: attr=value, and replace
+ // $attr with value on subsequent mappings.
+ if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 {
+ attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]))
+ r = strings.NewReplacer(attrs...)
+ }
+ // Ignore any unrecognized entries
+ continue
+ }
+ return nil, err
+ }
+ if m == nil {
+ continue
+ }
+ mapping = append(mapping, m)
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return mapping, nil
+}
+
+// removeLoggingInfo detects and removes log prefix entries generated
+// by the glog package. If no logging prefix is detected, the string
+// is returned unmodified.
+func removeLoggingInfo(line string) string {
+ if match := logInfoRE.FindStringIndex(line); match != nil {
+ return line[match[1]:]
+ }
+ return line
+}
+
+// ParseMemoryMap parses a memory map in the format of
+// /proc/self/maps, and overrides the mappings in the current profile.
+// It renumbers the samples and locations in the profile correspondingly.
+func (p *Profile) ParseMemoryMap(rd io.Reader) error {
+ return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd))
+}
+
+// ParseMemoryMapFromScanner parses a memory map in the format of
+// /proc/self/maps or a variety of legacy format, and overrides the
+// mappings in the current profile. It renumbers the samples and
+// locations in the profile correspondingly.
+func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error {
+ mapping, err := parseProcMapsFromScanner(s)
+ if err != nil {
+ return err
+ }
+ p.Mapping = append(p.Mapping, mapping...)
+ p.massageMappings()
+ p.remapLocationIDs()
+ p.remapFunctionIDs()
+ p.remapMappingIDs()
+ return nil
+}
+
+func parseMappingEntry(l string) (*Mapping, error) {
+ var start, end, perm, file, offset, buildID string
+ if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 {
+ start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5]
+ } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 {
+ start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6]
+ } else {
+ return nil, errUnrecognized
+ }
+
+ var err error
+ mapping := &Mapping{
+ File: file,
+ BuildID: buildID,
+ }
+ if perm != "" && !strings.Contains(perm, "x") {
+ // Skip non-executable entries.
+ return nil, nil
+ }
+ if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ if offset != "" {
+ if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ }
+ return mapping, nil
+}
+
+var memoryMapSentinels = []string{
+ "--- Memory map: ---",
+ "MAPPED_LIBRARIES:",
+}
+
+// isMemoryMapSentinel returns true if the string contains one of the
+// known sentinels for memory map information.
+func isMemoryMapSentinel(line string) bool {
+ for _, s := range memoryMapSentinels {
+ if strings.Contains(line, s) {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Profile) addLegacyFrameInfo() {
+ switch {
+ case isProfileType(p, heapzSampleTypes):
+ p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr
+ case isProfileType(p, contentionzSampleTypes):
+ p.DropFrames, p.KeepFrames = lockRxStr, ""
+ default:
+ p.DropFrames, p.KeepFrames = cpuProfilerRxStr, ""
+ }
+}
+
+var heapzSampleTypes = [][]string{
+ {"allocations", "size"}, // early Go pprof profiles
+ {"objects", "space"},
+ {"inuse_objects", "inuse_space"},
+ {"alloc_objects", "alloc_space"},
+ {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles
+}
+var contentionzSampleTypes = [][]string{
+ {"contentions", "delay"},
+}
+
+func isProfileType(p *Profile, types [][]string) bool {
+ st := p.SampleType
+nextType:
+ for _, t := range types {
+ if len(st) != len(t) {
+ continue
+ }
+
+ for i := range st {
+ if st[i].Type != t[i] {
+ continue nextType
+ }
+ }
+ return true
+ }
+ return false
+}
+
+var allocRxStr = strings.Join([]string{
+ // POSIX entry points.
+ `calloc`,
+ `cfree`,
+ `malloc`,
+ `free`,
+ `memalign`,
+ `do_memalign`,
+ `(__)?posix_memalign`,
+ `pvalloc`,
+ `valloc`,
+ `realloc`,
+
+ // TC malloc.
+ `tcmalloc::.*`,
+ `tc_calloc`,
+ `tc_cfree`,
+ `tc_malloc`,
+ `tc_free`,
+ `tc_memalign`,
+ `tc_posix_memalign`,
+ `tc_pvalloc`,
+ `tc_valloc`,
+ `tc_realloc`,
+ `tc_new`,
+ `tc_delete`,
+ `tc_newarray`,
+ `tc_deletearray`,
+ `tc_new_nothrow`,
+ `tc_newarray_nothrow`,
+
+ // Memory-allocation routines on OS X.
+ `malloc_zone_malloc`,
+ `malloc_zone_calloc`,
+ `malloc_zone_valloc`,
+ `malloc_zone_realloc`,
+ `malloc_zone_memalign`,
+ `malloc_zone_free`,
+
+ // Go runtime
+ `runtime\..*`,
+
+ // Other misc. memory allocation routines
+ `BaseArena::.*`,
+ `(::)?do_malloc_no_errno`,
+ `(::)?do_malloc_pages`,
+ `(::)?do_malloc`,
+ `DoSampledAllocation`,
+ `MallocedMemBlock::MallocedMemBlock`,
+ `_M_allocate`,
+ `__builtin_(vec_)?delete`,
+ `__builtin_(vec_)?new`,
+ `__gnu_cxx::new_allocator::allocate`,
+ `__libc_malloc`,
+ `__malloc_alloc_template::allocate`,
+ `allocate`,
+ `cpp_alloc`,
+ `operator new(\[\])?`,
+ `simple_alloc::allocate`,
+}, `|`)
+
+var allocSkipRxStr = strings.Join([]string{
+ // Preserve Go runtime frames that appear in the middle/bottom of
+ // the stack.
+ `runtime\.panic`,
+ `runtime\.reflectcall`,
+ `runtime\.call[0-9]*`,
+}, `|`)
+
+var cpuProfilerRxStr = strings.Join([]string{
+ `ProfileData::Add`,
+ `ProfileData::prof_handler`,
+ `CpuProfiler::prof_handler`,
+ `__pthread_sighandler`,
+ `__restore`,
+}, `|`)
+
+var lockRxStr = strings.Join([]string{
+ `RecordLockProfileData`,
+ `(base::)?RecordLockProfileData.*`,
+ `(base::)?SubmitMutexProfileData.*`,
+ `(base::)?SubmitSpinLockProfileData.*`,
+ `(base::Mutex::)?AwaitCommon.*`,
+ `(base::Mutex::)?Unlock.*`,
+ `(base::Mutex::)?UnlockSlow.*`,
+ `(base::Mutex::)?ReaderUnlock.*`,
+ `(base::MutexLock::)?~MutexLock.*`,
+ `(Mutex::)?AwaitCommon.*`,
+ `(Mutex::)?Unlock.*`,
+ `(Mutex::)?UnlockSlow.*`,
+ `(Mutex::)?ReaderUnlock.*`,
+ `(MutexLock::)?~MutexLock.*`,
+ `(SpinLock::)?Unlock.*`,
+ `(SpinLock::)?SlowUnlock.*`,
+ `(SpinLockHolder::)?~SpinLockHolder.*`,
+}, `|`)
diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go
new file mode 100644
index 0000000..eee0132
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/merge.go
@@ -0,0 +1,669 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Compact performs garbage collection on a profile to remove any
+// unreferenced fields. This is useful to reduce the size of a profile
+// after samples or locations have been removed.
+func (p *Profile) Compact() *Profile {
+ p, _ = Merge([]*Profile{p})
+ return p
+}
+
+// Merge merges all the profiles in profs into a single Profile.
+// Returns a new profile independent of the input profiles. The merged
+// profile is compacted to eliminate unused samples, locations,
+// functions and mappings. Profiles must have identical profile sample
+// and period types or the merge will fail. profile.Period of the
+// resulting profile will be the maximum of all profiles, and
+// profile.TimeNanos will be the earliest nonzero one. Merges are
+// associative with the caveat of the first profile having some
+// specialization in how headers are combined. There may be other
+// subtleties now or in the future regarding associativity.
+func Merge(srcs []*Profile) (*Profile, error) {
+ if len(srcs) == 0 {
+ return nil, fmt.Errorf("no profiles to merge")
+ }
+ p, err := combineHeaders(srcs)
+ if err != nil {
+ return nil, err
+ }
+
+ pm := &profileMerger{
+ p: p,
+ samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)),
+ locations: make(map[locationKey]*Location, len(srcs[0].Location)),
+ functions: make(map[functionKey]*Function, len(srcs[0].Function)),
+ mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
+ }
+
+ for _, src := range srcs {
+ // Clear the profile-specific hash tables
+ pm.locationsByID = makeLocationIDMap(len(src.Location))
+ pm.functionsByID = make(map[uint64]*Function, len(src.Function))
+ pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
+
+ if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
+ // The Mapping list has the property that the first mapping
+ // represents the main binary. Take the first Mapping we see,
+ // otherwise the operations below will add mappings in an
+ // arbitrary order.
+ pm.mapMapping(src.Mapping[0])
+ }
+
+ for _, s := range src.Sample {
+ if !isZeroSample(s) {
+ pm.mapSample(s)
+ }
+ }
+ }
+
+ for _, s := range p.Sample {
+ if isZeroSample(s) {
+ // If there are any zero samples, re-merge the profile to GC
+ // them.
+ return Merge([]*Profile{p})
+ }
+ }
+
+ return p, nil
+}
+
+// Normalize normalizes the source profile by multiplying each value in profile by the
+// ratio of the sum of the base profile's values of that sample type to the sum of the
+// source profile's value of that sample type.
+func (p *Profile) Normalize(pb *Profile) error {
+
+ if err := p.compatible(pb); err != nil {
+ return err
+ }
+
+ baseVals := make([]int64, len(p.SampleType))
+ for _, s := range pb.Sample {
+ for i, v := range s.Value {
+ baseVals[i] += v
+ }
+ }
+
+ srcVals := make([]int64, len(p.SampleType))
+ for _, s := range p.Sample {
+ for i, v := range s.Value {
+ srcVals[i] += v
+ }
+ }
+
+ normScale := make([]float64, len(baseVals))
+ for i := range baseVals {
+ if srcVals[i] == 0 {
+ normScale[i] = 0.0
+ } else {
+ normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
+ }
+ }
+ p.ScaleN(normScale)
+ return nil
+}
+
+func isZeroSample(s *Sample) bool {
+ for _, v := range s.Value {
+ if v != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+type profileMerger struct {
+ p *Profile
+
+ // Memoization tables within a profile.
+ locationsByID locationIDMap
+ functionsByID map[uint64]*Function
+ mappingsByID map[uint64]mapInfo
+
+ // Memoization tables for profile entities.
+ samples map[sampleKey]*Sample
+ locations map[locationKey]*Location
+ functions map[functionKey]*Function
+ mappings map[mappingKey]*Mapping
+}
+
+type mapInfo struct {
+ m *Mapping
+ offset int64
+}
+
+func (pm *profileMerger) mapSample(src *Sample) *Sample {
+ // Check memoization table
+ k := pm.sampleKey(src)
+ if ss, ok := pm.samples[k]; ok {
+ for i, v := range src.Value {
+ ss.Value[i] += v
+ }
+ return ss
+ }
+
+ // Make new sample.
+ s := &Sample{
+ Location: make([]*Location, len(src.Location)),
+ Value: make([]int64, len(src.Value)),
+ Label: make(map[string][]string, len(src.Label)),
+ NumLabel: make(map[string][]int64, len(src.NumLabel)),
+ NumUnit: make(map[string][]string, len(src.NumLabel)),
+ }
+ for i, l := range src.Location {
+ s.Location[i] = pm.mapLocation(l)
+ }
+ for k, v := range src.Label {
+ vv := make([]string, len(v))
+ copy(vv, v)
+ s.Label[k] = vv
+ }
+ for k, v := range src.NumLabel {
+ u := src.NumUnit[k]
+ vv := make([]int64, len(v))
+ uu := make([]string, len(u))
+ copy(vv, v)
+ copy(uu, u)
+ s.NumLabel[k] = vv
+ s.NumUnit[k] = uu
+ }
+ copy(s.Value, src.Value)
+ pm.samples[k] = s
+ pm.p.Sample = append(pm.p.Sample, s)
+ return s
+}
+
+func (pm *profileMerger) sampleKey(sample *Sample) sampleKey {
+ // Accumulate contents into a string.
+ var buf strings.Builder
+ buf.Grow(64) // Heuristic to avoid extra allocs
+
+ // encode a number
+ putNumber := func(v uint64) {
+ var num [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(num[:], v)
+ buf.Write(num[:n])
+ }
+
+ // encode a string prefixed with its length.
+ putDelimitedString := func(s string) {
+ putNumber(uint64(len(s)))
+ buf.WriteString(s)
+ }
+
+ for _, l := range sample.Location {
+ // Get the location in the merged profile, which may have a different ID.
+ if loc := pm.mapLocation(l); loc != nil {
+ putNumber(loc.ID)
+ }
+ }
+ putNumber(0) // Delimiter
+
+ for _, l := range sortedKeys1(sample.Label) {
+ putDelimitedString(l)
+ values := sample.Label[l]
+ putNumber(uint64(len(values)))
+ for _, v := range values {
+ putDelimitedString(v)
+ }
+ }
+
+ for _, l := range sortedKeys2(sample.NumLabel) {
+ putDelimitedString(l)
+ values := sample.NumLabel[l]
+ putNumber(uint64(len(values)))
+ for _, v := range values {
+ putNumber(uint64(v))
+ }
+ units := sample.NumUnit[l]
+ putNumber(uint64(len(units)))
+ for _, v := range units {
+ putDelimitedString(v)
+ }
+ }
+
+ return sampleKey(buf.String())
+}
+
+type sampleKey string
+
+// sortedKeys1 returns the sorted keys found in a string->[]string map.
+//
+// Note: this is currently non-generic since github pprof runs golint,
+// which does not support generics. When that issue is fixed, it can
+// be merged with sortedKeys2 and made into a generic function.
+func sortedKeys1(m map[string][]string) []string {
+ if len(m) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// sortedKeys2 returns the sorted keys found in a string->[]int64 map.
+//
+// Note: this is currently non-generic since github pprof runs golint,
+// which does not support generics. When that issue is fixed, it can
+// be merged with sortedKeys1 and made into a generic function.
+func sortedKeys2(m map[string][]int64) []string {
+ if len(m) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func (pm *profileMerger) mapLocation(src *Location) *Location {
+ if src == nil {
+ return nil
+ }
+
+ if l := pm.locationsByID.get(src.ID); l != nil {
+ return l
+ }
+
+ mi := pm.mapMapping(src.Mapping)
+ l := &Location{
+ ID: uint64(len(pm.p.Location) + 1),
+ Mapping: mi.m,
+ Address: uint64(int64(src.Address) + mi.offset),
+ Line: make([]Line, len(src.Line)),
+ IsFolded: src.IsFolded,
+ }
+ for i, ln := range src.Line {
+ l.Line[i] = pm.mapLine(ln)
+ }
+ // Check memoization table. Must be done on the remapped location to
+ // account for the remapped mapping ID.
+ k := l.key()
+ if ll, ok := pm.locations[k]; ok {
+ pm.locationsByID.set(src.ID, ll)
+ return ll
+ }
+ pm.locationsByID.set(src.ID, l)
+ pm.locations[k] = l
+ pm.p.Location = append(pm.p.Location, l)
+ return l
+}
+
+// key generates locationKey to be used as a key for maps.
+func (l *Location) key() locationKey {
+ key := locationKey{
+ addr: l.Address,
+ isFolded: l.IsFolded,
+ }
+ if l.Mapping != nil {
+ // Normalizes address to handle address space randomization.
+ key.addr -= l.Mapping.Start
+ key.mappingID = l.Mapping.ID
+ }
+ lines := make([]string, len(l.Line)*3)
+ for i, line := range l.Line {
+ if line.Function != nil {
+ lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
+ }
+ lines[i*2+1] = strconv.FormatInt(line.Line, 16)
+ lines[i*2+2] = strconv.FormatInt(line.Column, 16)
+ }
+ key.lines = strings.Join(lines, "|")
+ return key
+}
+
+type locationKey struct {
+ addr, mappingID uint64
+ lines string
+ isFolded bool
+}
+
+func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
+ if src == nil {
+ return mapInfo{}
+ }
+
+ if mi, ok := pm.mappingsByID[src.ID]; ok {
+ return mi
+ }
+
+ // Check memoization tables.
+ mk := src.key()
+ if m, ok := pm.mappings[mk]; ok {
+ mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
+ pm.mappingsByID[src.ID] = mi
+ return mi
+ }
+ m := &Mapping{
+ ID: uint64(len(pm.p.Mapping) + 1),
+ Start: src.Start,
+ Limit: src.Limit,
+ Offset: src.Offset,
+ File: src.File,
+ KernelRelocationSymbol: src.KernelRelocationSymbol,
+ BuildID: src.BuildID,
+ HasFunctions: src.HasFunctions,
+ HasFilenames: src.HasFilenames,
+ HasLineNumbers: src.HasLineNumbers,
+ HasInlineFrames: src.HasInlineFrames,
+ }
+ pm.p.Mapping = append(pm.p.Mapping, m)
+
+ // Update memoization tables.
+ pm.mappings[mk] = m
+ mi := mapInfo{m, 0}
+ pm.mappingsByID[src.ID] = mi
+ return mi
+}
+
+// key generates encoded strings of Mapping to be used as a key for
+// maps.
+func (m *Mapping) key() mappingKey {
+ // Normalize addresses to handle address space randomization.
+ // Round up to next 4K boundary to avoid minor discrepancies.
+ const mapsizeRounding = 0x1000
+
+ size := m.Limit - m.Start
+ size = size + mapsizeRounding - 1
+ size = size - (size % mapsizeRounding)
+ key := mappingKey{
+ size: size,
+ offset: m.Offset,
+ }
+
+ switch {
+ case m.BuildID != "":
+ key.buildIDOrFile = m.BuildID
+ case m.File != "":
+ key.buildIDOrFile = m.File
+ default:
+ // A mapping containing neither build ID nor file name is a fake mapping. A
+ // key with empty buildIDOrFile is used for fake mappings so that they are
+ // treated as the same mapping during merging.
+ }
+ return key
+}
+
+type mappingKey struct {
+ size, offset uint64
+ buildIDOrFile string
+}
+
+func (pm *profileMerger) mapLine(src Line) Line {
+ ln := Line{
+ Function: pm.mapFunction(src.Function),
+ Line: src.Line,
+ Column: src.Column,
+ }
+ return ln
+}
+
+func (pm *profileMerger) mapFunction(src *Function) *Function {
+ if src == nil {
+ return nil
+ }
+ if f, ok := pm.functionsByID[src.ID]; ok {
+ return f
+ }
+ k := src.key()
+ if f, ok := pm.functions[k]; ok {
+ pm.functionsByID[src.ID] = f
+ return f
+ }
+ f := &Function{
+ ID: uint64(len(pm.p.Function) + 1),
+ Name: src.Name,
+ SystemName: src.SystemName,
+ Filename: src.Filename,
+ StartLine: src.StartLine,
+ }
+ pm.functions[k] = f
+ pm.functionsByID[src.ID] = f
+ pm.p.Function = append(pm.p.Function, f)
+ return f
+}
+
+// key generates a struct to be used as a key for maps.
+func (f *Function) key() functionKey {
+ return functionKey{
+ f.StartLine,
+ f.Name,
+ f.SystemName,
+ f.Filename,
+ }
+}
+
+type functionKey struct {
+ startLine int64
+ name, systemName, fileName string
+}
+
+// combineHeaders checks that all profiles can be merged and returns
+// their combined profile.
+func combineHeaders(srcs []*Profile) (*Profile, error) {
+ for _, s := range srcs[1:] {
+ if err := srcs[0].compatible(s); err != nil {
+ return nil, err
+ }
+ }
+
+ var timeNanos, durationNanos, period int64
+ var comments []string
+ seenComments := map[string]bool{}
+ var defaultSampleType string
+ for _, s := range srcs {
+ if timeNanos == 0 || s.TimeNanos < timeNanos {
+ timeNanos = s.TimeNanos
+ }
+ durationNanos += s.DurationNanos
+ if period == 0 || period < s.Period {
+ period = s.Period
+ }
+ for _, c := range s.Comments {
+ if seen := seenComments[c]; !seen {
+ comments = append(comments, c)
+ seenComments[c] = true
+ }
+ }
+ if defaultSampleType == "" {
+ defaultSampleType = s.DefaultSampleType
+ }
+ }
+
+ p := &Profile{
+ SampleType: make([]*ValueType, len(srcs[0].SampleType)),
+
+ DropFrames: srcs[0].DropFrames,
+ KeepFrames: srcs[0].KeepFrames,
+
+ TimeNanos: timeNanos,
+ DurationNanos: durationNanos,
+ PeriodType: srcs[0].PeriodType,
+ Period: period,
+
+ Comments: comments,
+ DefaultSampleType: defaultSampleType,
+ }
+ copy(p.SampleType, srcs[0].SampleType)
+ return p, nil
+}
+
+// compatible determines if two profiles can be compared/merged.
+// returns nil if the profiles are compatible; otherwise an error with
+// details on the incompatibility.
+func (p *Profile) compatible(pb *Profile) error {
+ if !equalValueType(p.PeriodType, pb.PeriodType) {
+ return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
+ }
+
+ if len(p.SampleType) != len(pb.SampleType) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+
+ for i := range p.SampleType {
+ if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+ }
+ return nil
+}
+
+// equalValueType returns true if the two value types are semantically
+// equal. It ignores the internal fields used during encode/decode.
+func equalValueType(st1, st2 *ValueType) bool {
+ return st1.Type == st2.Type && st1.Unit == st2.Unit
+}
+
+// locationIDMap is like a map[uint64]*Location, but provides efficiency for
+// ids that are densely numbered, which is often the case.
+type locationIDMap struct {
+ dense []*Location // indexed by id for id < len(dense)
+ sparse map[uint64]*Location // indexed by id for id >= len(dense)
+}
+
+func makeLocationIDMap(n int) locationIDMap {
+ return locationIDMap{
+ dense: make([]*Location, n),
+ sparse: map[uint64]*Location{},
+ }
+}
+
+func (lm locationIDMap) get(id uint64) *Location {
+ if id < uint64(len(lm.dense)) {
+ return lm.dense[int(id)]
+ }
+ return lm.sparse[id]
+}
+
+func (lm locationIDMap) set(id uint64, loc *Location) {
+ if id < uint64(len(lm.dense)) {
+ lm.dense[id] = loc
+ return
+ }
+ lm.sparse[id] = loc
+}
+
+// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It
+// keeps sample types that appear in all profiles only and drops/reorders the
+// sample types as necessary.
+//
+// In the case of sample types order is not the same for given profiles the
+// order is derived from the first profile.
+//
+// Profiles are modified in-place.
+//
+// It returns an error if the sample type's intersection is empty.
+func CompatibilizeSampleTypes(ps []*Profile) error {
+ sTypes := commonSampleTypes(ps)
+ if len(sTypes) == 0 {
+ return fmt.Errorf("profiles have empty common sample type list")
+ }
+ for _, p := range ps {
+ if err := compatibilizeSampleTypes(p, sTypes); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// commonSampleTypes returns sample types that appear in all profiles in the
+// order how they ordered in the first profile.
+func commonSampleTypes(ps []*Profile) []string {
+ if len(ps) == 0 {
+ return nil
+ }
+ sTypes := map[string]int{}
+ for _, p := range ps {
+ for _, st := range p.SampleType {
+ sTypes[st.Type]++
+ }
+ }
+ var res []string
+ for _, st := range ps[0].SampleType {
+ if sTypes[st.Type] == len(ps) {
+ res = append(res, st.Type)
+ }
+ }
+ return res
+}
+
+// compatibilizeSampleTypes drops sample types that are not present in sTypes
+// list and reorder them if needed.
+//
+// It sets DefaultSampleType to sType[0] if it is not in sType list.
+//
+// It assumes that all sample types from the sTypes list are present in the
+// given profile otherwise it returns an error.
+func compatibilizeSampleTypes(p *Profile, sTypes []string) error {
+ if len(sTypes) == 0 {
+ return fmt.Errorf("sample type list is empty")
+ }
+ defaultSampleType := sTypes[0]
+ reMap, needToModify := make([]int, len(sTypes)), false
+ for i, st := range sTypes {
+ if st == p.DefaultSampleType {
+ defaultSampleType = p.DefaultSampleType
+ }
+ idx := searchValueType(p.SampleType, st)
+ if idx < 0 {
+ return fmt.Errorf("%q sample type is not found in profile", st)
+ }
+ reMap[i] = idx
+ if idx != i {
+ needToModify = true
+ }
+ }
+ if !needToModify && len(sTypes) == len(p.SampleType) {
+ return nil
+ }
+ p.DefaultSampleType = defaultSampleType
+ oldSampleTypes := p.SampleType
+ p.SampleType = make([]*ValueType, len(sTypes))
+ for i, idx := range reMap {
+ p.SampleType[i] = oldSampleTypes[idx]
+ }
+ values := make([]int64, len(sTypes))
+ for _, s := range p.Sample {
+ for i, idx := range reMap {
+ values[i] = s.Value[idx]
+ }
+ s.Value = s.Value[:len(values)]
+ copy(s.Value, values)
+ }
+ return nil
+}
+
+func searchValueType(vts []*ValueType, s string) int {
+ for i, vt := range vts {
+ if vt.Type == s {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go
new file mode 100644
index 0000000..5551eb0
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/profile.go
@@ -0,0 +1,864 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package profile provides a representation of profile.proto and
+// methods to encode/decode profiles in this format.
+package profile
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "math"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Profile is an in-memory representation of profile.proto.
+type Profile struct {
+ SampleType []*ValueType
+ DefaultSampleType string
+ Sample []*Sample
+ Mapping []*Mapping
+ Location []*Location
+ Function []*Function
+ Comments []string
+
+ DropFrames string
+ KeepFrames string
+
+ TimeNanos int64
+ DurationNanos int64
+ PeriodType *ValueType
+ Period int64
+
+ // The following fields are modified during encoding and copying,
+ // so are protected by a Mutex.
+ encodeMu sync.Mutex
+
+ commentX []int64
+ dropFramesX int64
+ keepFramesX int64
+ stringTable []string
+ defaultSampleTypeX int64
+}
+
+// ValueType corresponds to Profile.ValueType
+type ValueType struct {
+ Type string // cpu, wall, inuse_space, etc
+ Unit string // seconds, nanoseconds, bytes, etc
+
+ typeX int64
+ unitX int64
+}
+
+// Sample corresponds to Profile.Sample
+type Sample struct {
+ Location []*Location
+ Value []int64
+ // Label is a per-label-key map to values for string labels.
+ //
+ // In general, having multiple values for the given label key is strongly
+ // discouraged - see docs for the sample label field in profile.proto. The
+ // main reason this unlikely state is tracked here is to make the
+ // decoding->encoding roundtrip not lossy. But we expect that the value
+ // slices present in this map are always of length 1.
+ Label map[string][]string
+ // NumLabel is a per-label-key map to values for numeric labels. See a note
+ // above on handling multiple values for a label.
+ NumLabel map[string][]int64
+ // NumUnit is a per-label-key map to the unit names of corresponding numeric
+ // label values. The unit info may be missing even if the label is in
+ // NumLabel, see the docs in profile.proto for details. When the value is
+ // slice is present and not nil, its length must be equal to the length of
+ // the corresponding value slice in NumLabel.
+ NumUnit map[string][]string
+
+ locationIDX []uint64
+ labelX []label
+}
+
+// label corresponds to Profile.Label
+type label struct {
+ keyX int64
+ // Exactly one of the two following values must be set
+ strX int64
+ numX int64 // Integer value for this label
+ // can be set if numX has value
+ unitX int64
+}
+
+// Mapping corresponds to Profile.Mapping
+type Mapping struct {
+ ID uint64
+ Start uint64
+ Limit uint64
+ Offset uint64
+ File string
+ BuildID string
+ HasFunctions bool
+ HasFilenames bool
+ HasLineNumbers bool
+ HasInlineFrames bool
+
+ fileX int64
+ buildIDX int64
+
+ // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File.
+ // For linux kernel mappings generated by some tools, correct symbolization depends
+ // on knowing which of the two possible relocation symbols was used for `Start`.
+ // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext").
+ //
+ // Note, this public field is not persisted in the proto. For the purposes of
+ // copying / merging / hashing profiles, it is considered subsumed by `File`.
+ KernelRelocationSymbol string
+}
+
+// Location corresponds to Profile.Location
+type Location struct {
+ ID uint64
+ Mapping *Mapping
+ Address uint64
+ Line []Line
+ IsFolded bool
+
+ mappingIDX uint64
+}
+
+// Line corresponds to Profile.Line
+type Line struct {
+ Function *Function
+ Line int64
+ Column int64
+
+ functionIDX uint64
+}
+
+// Function corresponds to Profile.Function
+type Function struct {
+ ID uint64
+ Name string
+ SystemName string
+ Filename string
+ StartLine int64
+
+ nameX int64
+ systemNameX int64
+ filenameX int64
+}
+
+// Parse parses a profile and checks for its validity. The input
+// may be a gzip-compressed encoded protobuf or one of many legacy
+// profile formats which may be unsupported in the future.
+func Parse(r io.Reader) (*Profile, error) {
+ data, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ return ParseData(data)
+}
+
+// ParseData parses a profile from a buffer and checks for its
+// validity.
+func ParseData(data []byte) (*Profile, error) {
+ var p *Profile
+ var err error
+ if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
+ gz, err := gzip.NewReader(bytes.NewBuffer(data))
+ if err == nil {
+ data, err = io.ReadAll(gz)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("decompressing profile: %v", err)
+ }
+ }
+ if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile {
+ p, err = parseLegacy(data)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("parsing profile: %v", err)
+ }
+
+ if err := p.CheckValid(); err != nil {
+ return nil, fmt.Errorf("malformed profile: %v", err)
+ }
+ return p, nil
+}
+
+var errUnrecognized = fmt.Errorf("unrecognized profile format")
+var errMalformed = fmt.Errorf("malformed profile format")
+var errNoData = fmt.Errorf("empty input file")
+var errConcatProfile = fmt.Errorf("concatenated profiles detected")
+
+func parseLegacy(data []byte) (*Profile, error) {
+ parsers := []func([]byte) (*Profile, error){
+ parseCPU,
+ parseHeap,
+ parseGoCount, // goroutine, threadcreate
+ parseThread,
+ parseContention,
+ parseJavaProfile,
+ }
+
+ for _, parser := range parsers {
+ p, err := parser(data)
+ if err == nil {
+ p.addLegacyFrameInfo()
+ return p, nil
+ }
+ if err != errUnrecognized {
+ return nil, err
+ }
+ }
+ return nil, errUnrecognized
+}
+
+// ParseUncompressed parses an uncompressed protobuf into a profile.
+func ParseUncompressed(data []byte) (*Profile, error) {
+ if len(data) == 0 {
+ return nil, errNoData
+ }
+ p := &Profile{}
+ if err := unmarshal(data, p); err != nil {
+ return nil, err
+ }
+
+ if err := p.postDecode(); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`)
+
+// massageMappings applies heuristic-based changes to the profile
+// mappings to account for quirks of some environments.
+func (p *Profile) massageMappings() {
+ // Merge adjacent regions with matching names, checking that the offsets match
+ if len(p.Mapping) > 1 {
+ mappings := []*Mapping{p.Mapping[0]}
+ for _, m := range p.Mapping[1:] {
+ lm := mappings[len(mappings)-1]
+ if adjacent(lm, m) {
+ lm.Limit = m.Limit
+ if m.File != "" {
+ lm.File = m.File
+ }
+ if m.BuildID != "" {
+ lm.BuildID = m.BuildID
+ }
+ p.updateLocationMapping(m, lm)
+ continue
+ }
+ mappings = append(mappings, m)
+ }
+ p.Mapping = mappings
+ }
+
+ // Use heuristics to identify main binary and move it to the top of the list of mappings
+ for i, m := range p.Mapping {
+ file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1))
+ if len(file) == 0 {
+ continue
+ }
+ if len(libRx.FindStringSubmatch(file)) > 0 {
+ continue
+ }
+ if file[0] == '[' {
+ continue
+ }
+ // Swap what we guess is main to position 0.
+ p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0]
+ break
+ }
+
+ // Keep the mapping IDs neatly sorted
+ for i, m := range p.Mapping {
+ m.ID = uint64(i + 1)
+ }
+}
+
+// adjacent returns whether two mapping entries represent the same
+// mapping that has been split into two. Check that their addresses are adjacent,
+// and if the offsets match, if they are available.
+func adjacent(m1, m2 *Mapping) bool {
+ if m1.File != "" && m2.File != "" {
+ if m1.File != m2.File {
+ return false
+ }
+ }
+ if m1.BuildID != "" && m2.BuildID != "" {
+ if m1.BuildID != m2.BuildID {
+ return false
+ }
+ }
+ if m1.Limit != m2.Start {
+ return false
+ }
+ if m1.Offset != 0 && m2.Offset != 0 {
+ offset := m1.Offset + (m1.Limit - m1.Start)
+ if offset != m2.Offset {
+ return false
+ }
+ }
+ return true
+}
+
+func (p *Profile) updateLocationMapping(from, to *Mapping) {
+ for _, l := range p.Location {
+ if l.Mapping == from {
+ l.Mapping = to
+ }
+ }
+}
+
+func serialize(p *Profile) []byte {
+ p.encodeMu.Lock()
+ p.preEncode()
+ b := marshal(p)
+ p.encodeMu.Unlock()
+ return b
+}
+
+// Write writes the profile as a gzip-compressed marshaled protobuf.
+func (p *Profile) Write(w io.Writer) error {
+ zw := gzip.NewWriter(w)
+ defer zw.Close()
+ _, err := zw.Write(serialize(p))
+ return err
+}
+
+// WriteUncompressed writes the profile as a marshaled protobuf.
+func (p *Profile) WriteUncompressed(w io.Writer) error {
+ _, err := w.Write(serialize(p))
+ return err
+}
+
+// CheckValid tests whether the profile is valid. Checks include, but are
+// not limited to:
+// - len(Profile.Sample[n].value) == len(Profile.value_unit)
+// - Sample.id has a corresponding Profile.Location
+func (p *Profile) CheckValid() error {
+ // Check that sample values are consistent
+ sampleLen := len(p.SampleType)
+ if sampleLen == 0 && len(p.Sample) != 0 {
+ return fmt.Errorf("missing sample type information")
+ }
+ for _, s := range p.Sample {
+ if s == nil {
+ return fmt.Errorf("profile has nil sample")
+ }
+ if len(s.Value) != sampleLen {
+ return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType))
+ }
+ for _, l := range s.Location {
+ if l == nil {
+ return fmt.Errorf("sample has nil location")
+ }
+ }
+ }
+
+ // Check that all mappings/locations/functions are in the tables
+ // Check that there are no duplicate ids
+ mappings := make(map[uint64]*Mapping, len(p.Mapping))
+ for _, m := range p.Mapping {
+ if m == nil {
+ return fmt.Errorf("profile has nil mapping")
+ }
+ if m.ID == 0 {
+ return fmt.Errorf("found mapping with reserved ID=0")
+ }
+ if mappings[m.ID] != nil {
+ return fmt.Errorf("multiple mappings with same id: %d", m.ID)
+ }
+ mappings[m.ID] = m
+ }
+ functions := make(map[uint64]*Function, len(p.Function))
+ for _, f := range p.Function {
+ if f == nil {
+ return fmt.Errorf("profile has nil function")
+ }
+ if f.ID == 0 {
+ return fmt.Errorf("found function with reserved ID=0")
+ }
+ if functions[f.ID] != nil {
+ return fmt.Errorf("multiple functions with same id: %d", f.ID)
+ }
+ functions[f.ID] = f
+ }
+ locations := make(map[uint64]*Location, len(p.Location))
+ for _, l := range p.Location {
+ if l == nil {
+ return fmt.Errorf("profile has nil location")
+ }
+ if l.ID == 0 {
+ return fmt.Errorf("found location with reserved id=0")
+ }
+ if locations[l.ID] != nil {
+ return fmt.Errorf("multiple locations with same id: %d", l.ID)
+ }
+ locations[l.ID] = l
+ if m := l.Mapping; m != nil {
+ if m.ID == 0 || mappings[m.ID] != m {
+ return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID)
+ }
+ }
+ for _, ln := range l.Line {
+ f := ln.Function
+ if f == nil {
+ return fmt.Errorf("location id: %d has a line with nil function", l.ID)
+ }
+ if f.ID == 0 || functions[f.ID] != f {
+ return fmt.Errorf("inconsistent function %p: %d", f, f.ID)
+ }
+ }
+ }
+ return nil
+}
+
+// Aggregate merges the locations in the profile into equivalence
+// classes preserving the request attributes. It also updates the
+// samples to point to the merged locations.
+func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error {
+ for _, m := range p.Mapping {
+ m.HasInlineFrames = m.HasInlineFrames && inlineFrame
+ m.HasFunctions = m.HasFunctions && function
+ m.HasFilenames = m.HasFilenames && filename
+ m.HasLineNumbers = m.HasLineNumbers && linenumber
+ }
+
+ // Aggregate functions
+ if !function || !filename {
+ for _, f := range p.Function {
+ if !function {
+ f.Name = ""
+ f.SystemName = ""
+ }
+ if !filename {
+ f.Filename = ""
+ }
+ }
+ }
+
+ // Aggregate locations
+ if !inlineFrame || !address || !linenumber || !columnnumber {
+ for _, l := range p.Location {
+ if !inlineFrame && len(l.Line) > 1 {
+ l.Line = l.Line[len(l.Line)-1:]
+ }
+ if !linenumber {
+ for i := range l.Line {
+ l.Line[i].Line = 0
+ l.Line[i].Column = 0
+ }
+ }
+ if !columnnumber {
+ for i := range l.Line {
+ l.Line[i].Column = 0
+ }
+ }
+ if !address {
+ l.Address = 0
+ }
+ }
+ }
+
+ return p.CheckValid()
+}
+
+// NumLabelUnits returns a map of numeric label keys to the units
+// associated with those keys and a map of those keys to any units
+// that were encountered but not used.
+// Unit for a given key is the first encountered unit for that key. If multiple
+// units are encountered for values paired with a particular key, then the first
+// unit encountered is used and all other units are returned in sorted order
+// in map of ignored units.
+// If no units are encountered for a particular key, the unit is then inferred
+// based on the key.
+func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) {
+ numLabelUnits := map[string]string{}
+ ignoredUnits := map[string]map[string]bool{}
+ encounteredKeys := map[string]bool{}
+
+ // Determine units based on numeric tags for each sample.
+ for _, s := range p.Sample {
+ for k := range s.NumLabel {
+ encounteredKeys[k] = true
+ for _, unit := range s.NumUnit[k] {
+ if unit == "" {
+ continue
+ }
+ if wantUnit, ok := numLabelUnits[k]; !ok {
+ numLabelUnits[k] = unit
+ } else if wantUnit != unit {
+ if v, ok := ignoredUnits[k]; ok {
+ v[unit] = true
+ } else {
+ ignoredUnits[k] = map[string]bool{unit: true}
+ }
+ }
+ }
+ }
+ }
+ // Infer units for keys without any units associated with
+ // numeric tag values.
+ for key := range encounteredKeys {
+ unit := numLabelUnits[key]
+ if unit == "" {
+ switch key {
+ case "alignment", "request":
+ numLabelUnits[key] = "bytes"
+ default:
+ numLabelUnits[key] = key
+ }
+ }
+ }
+
+ // Copy ignored units into more readable format
+ unitsIgnored := make(map[string][]string, len(ignoredUnits))
+ for key, values := range ignoredUnits {
+ units := make([]string, len(values))
+ i := 0
+ for unit := range values {
+ units[i] = unit
+ i++
+ }
+ sort.Strings(units)
+ unitsIgnored[key] = units
+ }
+
+ return numLabelUnits, unitsIgnored
+}
+
+// String dumps a text representation of a profile. Intended mainly
+// for debugging purposes.
+func (p *Profile) String() string {
+ ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location))
+ for _, c := range p.Comments {
+ ss = append(ss, "Comment: "+c)
+ }
+ if pt := p.PeriodType; pt != nil {
+ ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
+ }
+ ss = append(ss, fmt.Sprintf("Period: %d", p.Period))
+ if p.TimeNanos != 0 {
+ ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos)))
+ }
+ if p.DurationNanos != 0 {
+ ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos)))
+ }
+
+ ss = append(ss, "Samples:")
+ var sh1 string
+ for _, s := range p.SampleType {
+ dflt := ""
+ if s.Type == p.DefaultSampleType {
+ dflt = "[dflt]"
+ }
+ sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt)
+ }
+ ss = append(ss, strings.TrimSpace(sh1))
+ for _, s := range p.Sample {
+ ss = append(ss, s.string())
+ }
+
+ ss = append(ss, "Locations")
+ for _, l := range p.Location {
+ ss = append(ss, l.string())
+ }
+
+ ss = append(ss, "Mappings")
+ for _, m := range p.Mapping {
+ ss = append(ss, m.string())
+ }
+
+ return strings.Join(ss, "\n") + "\n"
+}
+
+// string dumps a text representation of a mapping. Intended mainly
+// for debugging purposes.
+func (m *Mapping) string() string {
+ bits := ""
+ if m.HasFunctions {
+ bits = bits + "[FN]"
+ }
+ if m.HasFilenames {
+ bits = bits + "[FL]"
+ }
+ if m.HasLineNumbers {
+ bits = bits + "[LN]"
+ }
+ if m.HasInlineFrames {
+ bits = bits + "[IN]"
+ }
+ return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s",
+ m.ID,
+ m.Start, m.Limit, m.Offset,
+ m.File,
+ m.BuildID,
+ bits)
+}
+
+// string dumps a text representation of a location. Intended mainly
+// for debugging purposes.
+func (l *Location) string() string {
+ ss := []string{}
+ locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address)
+ if m := l.Mapping; m != nil {
+ locStr = locStr + fmt.Sprintf("M=%d ", m.ID)
+ }
+ if l.IsFolded {
+ locStr = locStr + "[F] "
+ }
+ if len(l.Line) == 0 {
+ ss = append(ss, locStr)
+ }
+ for li := range l.Line {
+ lnStr := "??"
+ if fn := l.Line[li].Function; fn != nil {
+ lnStr = fmt.Sprintf("%s %s:%d:%d s=%d",
+ fn.Name,
+ fn.Filename,
+ l.Line[li].Line,
+ l.Line[li].Column,
+ fn.StartLine)
+ if fn.Name != fn.SystemName {
+ lnStr = lnStr + "(" + fn.SystemName + ")"
+ }
+ }
+ ss = append(ss, locStr+lnStr)
+ // Do not print location details past the first line
+ locStr = " "
+ }
+ return strings.Join(ss, "\n")
+}
+
+// string dumps a text representation of a sample. Intended mainly
+// for debugging purposes.
+func (s *Sample) string() string {
+ ss := []string{}
+ var sv string
+ for _, v := range s.Value {
+ sv = fmt.Sprintf("%s %10d", sv, v)
+ }
+ sv = sv + ": "
+ for _, l := range s.Location {
+ sv = sv + fmt.Sprintf("%d ", l.ID)
+ }
+ ss = append(ss, sv)
+ const labelHeader = " "
+ if len(s.Label) > 0 {
+ ss = append(ss, labelHeader+labelsToString(s.Label))
+ }
+ if len(s.NumLabel) > 0 {
+ ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit))
+ }
+ return strings.Join(ss, "\n")
+}
+
+// labelsToString returns a string representation of a
+// map representing labels.
+func labelsToString(labels map[string][]string) string {
+ ls := []string{}
+ for k, v := range labels {
+ ls = append(ls, fmt.Sprintf("%s:%v", k, v))
+ }
+ sort.Strings(ls)
+ return strings.Join(ls, " ")
+}
+
+// numLabelsToString returns a string representation of a map
+// representing numeric labels.
+func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string {
+ ls := []string{}
+ for k, v := range numLabels {
+ units := numUnits[k]
+ var labelString string
+ if len(units) == len(v) {
+ values := make([]string, len(v))
+ for i, vv := range v {
+ values[i] = fmt.Sprintf("%d %s", vv, units[i])
+ }
+ labelString = fmt.Sprintf("%s:%v", k, values)
+ } else {
+ labelString = fmt.Sprintf("%s:%v", k, v)
+ }
+ ls = append(ls, labelString)
+ }
+ sort.Strings(ls)
+ return strings.Join(ls, " ")
+}
+
+// SetLabel sets the specified key to the specified value for all samples in the
+// profile.
+func (p *Profile) SetLabel(key string, value []string) {
+ for _, sample := range p.Sample {
+ if sample.Label == nil {
+ sample.Label = map[string][]string{key: value}
+ } else {
+ sample.Label[key] = value
+ }
+ }
+}
+
+// RemoveLabel removes all labels associated with the specified key for all
+// samples in the profile.
+func (p *Profile) RemoveLabel(key string) {
+ for _, sample := range p.Sample {
+ delete(sample.Label, key)
+ }
+}
+
+// HasLabel returns true if a sample has a label with indicated key and value.
+func (s *Sample) HasLabel(key, value string) bool {
+ for _, v := range s.Label[key] {
+ if v == value {
+ return true
+ }
+ }
+ return false
+}
+
+// SetNumLabel sets the specified key to the specified value for all samples in the
+// profile. "unit" is a slice that describes the units that each corresponding member
+// of "values" is measured in (e.g. bytes or seconds). If there is no relevant
+// unit for a given value, that member of "unit" should be the empty string.
+// "unit" must either have the same length as "value", or be nil.
+func (p *Profile) SetNumLabel(key string, value []int64, unit []string) {
+ for _, sample := range p.Sample {
+ if sample.NumLabel == nil {
+ sample.NumLabel = map[string][]int64{key: value}
+ } else {
+ sample.NumLabel[key] = value
+ }
+ if sample.NumUnit == nil {
+ sample.NumUnit = map[string][]string{key: unit}
+ } else {
+ sample.NumUnit[key] = unit
+ }
+ }
+}
+
+// RemoveNumLabel removes all numerical labels associated with the specified key for all
+// samples in the profile.
+func (p *Profile) RemoveNumLabel(key string) {
+ for _, sample := range p.Sample {
+ delete(sample.NumLabel, key)
+ delete(sample.NumUnit, key)
+ }
+}
+
+// DiffBaseSample returns true if a sample belongs to the diff base and false
+// otherwise.
+func (s *Sample) DiffBaseSample() bool {
+ return s.HasLabel("pprof::base", "true")
+}
+
+// Scale multiplies all sample values in a profile by a constant and keeps
+// only samples that have at least one non-zero value.
+func (p *Profile) Scale(ratio float64) {
+ if ratio == 1 {
+ return
+ }
+ ratios := make([]float64, len(p.SampleType))
+ for i := range p.SampleType {
+ ratios[i] = ratio
+ }
+ p.ScaleN(ratios)
+}
+
+// ScaleN multiplies each sample values in a sample by a different amount
+// and keeps only samples that have at least one non-zero value.
+func (p *Profile) ScaleN(ratios []float64) error {
+ if len(p.SampleType) != len(ratios) {
+ return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType))
+ }
+ allOnes := true
+ for _, r := range ratios {
+ if r != 1 {
+ allOnes = false
+ break
+ }
+ }
+ if allOnes {
+ return nil
+ }
+ fillIdx := 0
+ for _, s := range p.Sample {
+ keepSample := false
+ for i, v := range s.Value {
+ if ratios[i] != 1 {
+ val := int64(math.Round(float64(v) * ratios[i]))
+ s.Value[i] = val
+ keepSample = keepSample || val != 0
+ }
+ }
+ if keepSample {
+ p.Sample[fillIdx] = s
+ fillIdx++
+ }
+ }
+ p.Sample = p.Sample[:fillIdx]
+ return nil
+}
+
+// HasFunctions determines if all locations in this profile have
+// symbolized function information.
+func (p *Profile) HasFunctions() bool {
+ for _, l := range p.Location {
+ if l.Mapping != nil && !l.Mapping.HasFunctions {
+ return false
+ }
+ }
+ return true
+}
+
+// HasFileLines determines if all locations in this profile have
+// symbolized file and line number information.
+func (p *Profile) HasFileLines() bool {
+ for _, l := range p.Location {
+ if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) {
+ return false
+ }
+ }
+ return true
+}
+
+// Unsymbolizable returns true if a mapping points to a binary for which
+// locations can't be symbolized in principle, at least now. Examples are
+// "[vdso]", [vsyscall]" and some others, see the code.
+func (m *Mapping) Unsymbolizable() bool {
+ name := filepath.Base(m.File)
+ return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon"
+}
+
+// Copy makes a fully independent copy of a profile.
+func (p *Profile) Copy() *Profile {
+ pp := &Profile{}
+ if err := unmarshal(serialize(p), pp); err != nil {
+ panic(err)
+ }
+ if err := pp.postDecode(); err != nil {
+ panic(err)
+ }
+
+ return pp
+}
diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go
new file mode 100644
index 0000000..a15696b
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/proto.go
@@ -0,0 +1,367 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file is a simple protocol buffer encoder and decoder.
+// The format is described at
+// https://developers.google.com/protocol-buffers/docs/encoding
+//
+// A protocol message must implement the message interface:
+// decoder() []decoder
+// encode(*buffer)
+//
+// The decode method returns a slice indexed by field number that gives the
+// function to decode that field.
+// The encode method encodes its receiver into the given buffer.
+//
+// The two methods are simple enough to be implemented by hand rather than
+// by using a protocol compiler.
+//
+// See profile.go for examples of messages implementing this interface.
+//
+// There is no support for groups, message sets, or "has" bits.
+
+package profile
+
+import (
+ "errors"
+ "fmt"
+)
+
+type buffer struct {
+ field int // field tag
+ typ int // proto wire type code for field
+ u64 uint64
+ data []byte
+ tmp [16]byte
+ tmpLines []Line // temporary storage used while decoding "repeated Line".
+}
+
+type decoder func(*buffer, message) error
+
+type message interface {
+ decoder() []decoder
+ encode(*buffer)
+}
+
+func marshal(m message) []byte {
+ var b buffer
+ m.encode(&b)
+ return b.data
+}
+
+func encodeVarint(b *buffer, x uint64) {
+ for x >= 128 {
+ b.data = append(b.data, byte(x)|0x80)
+ x >>= 7
+ }
+ b.data = append(b.data, byte(x))
+}
+
+func encodeLength(b *buffer, tag int, len int) {
+ encodeVarint(b, uint64(tag)<<3|2)
+ encodeVarint(b, uint64(len))
+}
+
+func encodeUint64(b *buffer, tag int, x uint64) {
+ // append varint to b.data
+ encodeVarint(b, uint64(tag)<<3)
+ encodeVarint(b, x)
+}
+
+func encodeUint64s(b *buffer, tag int, x []uint64) {
+ if len(x) > 2 {
+ // Use packed encoding
+ n1 := len(b.data)
+ for _, u := range x {
+ encodeVarint(b, u)
+ }
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ return
+ }
+ for _, u := range x {
+ encodeUint64(b, tag, u)
+ }
+}
+
+func encodeUint64Opt(b *buffer, tag int, x uint64) {
+ if x == 0 {
+ return
+ }
+ encodeUint64(b, tag, x)
+}
+
+func encodeInt64(b *buffer, tag int, x int64) {
+ u := uint64(x)
+ encodeUint64(b, tag, u)
+}
+
+func encodeInt64s(b *buffer, tag int, x []int64) {
+ if len(x) > 2 {
+ // Use packed encoding
+ n1 := len(b.data)
+ for _, u := range x {
+ encodeVarint(b, uint64(u))
+ }
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ return
+ }
+ for _, u := range x {
+ encodeInt64(b, tag, u)
+ }
+}
+
+func encodeInt64Opt(b *buffer, tag int, x int64) {
+ if x == 0 {
+ return
+ }
+ encodeInt64(b, tag, x)
+}
+
+func encodeString(b *buffer, tag int, x string) {
+ encodeLength(b, tag, len(x))
+ b.data = append(b.data, x...)
+}
+
+func encodeStrings(b *buffer, tag int, x []string) {
+ for _, s := range x {
+ encodeString(b, tag, s)
+ }
+}
+
+func encodeBool(b *buffer, tag int, x bool) {
+ if x {
+ encodeUint64(b, tag, 1)
+ } else {
+ encodeUint64(b, tag, 0)
+ }
+}
+
+func encodeBoolOpt(b *buffer, tag int, x bool) {
+ if x {
+ encodeBool(b, tag, x)
+ }
+}
+
+func encodeMessage(b *buffer, tag int, m message) {
+ n1 := len(b.data)
+ m.encode(b)
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+}
+
+func unmarshal(data []byte, m message) (err error) {
+ b := buffer{data: data, typ: 2}
+ return decodeMessage(&b, m)
+}
+
+func le64(p []byte) uint64 {
+ return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
+}
+
+func le32(p []byte) uint32 {
+ return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
+}
+
+func decodeVarint(data []byte) (uint64, []byte, error) {
+ var u uint64
+ for i := 0; ; i++ {
+ if i >= 10 || i >= len(data) {
+ return 0, nil, errors.New("bad varint")
+ }
+ u |= uint64(data[i]&0x7F) << uint(7*i)
+ if data[i]&0x80 == 0 {
+ return u, data[i+1:], nil
+ }
+ }
+}
+
+func decodeField(b *buffer, data []byte) ([]byte, error) {
+ x, data, err := decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ b.field = int(x >> 3)
+ b.typ = int(x & 7)
+ b.data = nil
+ b.u64 = 0
+ switch b.typ {
+ case 0:
+ b.u64, data, err = decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ case 1:
+ if len(data) < 8 {
+ return nil, errors.New("not enough data")
+ }
+ b.u64 = le64(data[:8])
+ data = data[8:]
+ case 2:
+ var n uint64
+ n, data, err = decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ if n > uint64(len(data)) {
+ return nil, errors.New("too much data")
+ }
+ b.data = data[:n]
+ data = data[n:]
+ case 5:
+ if len(data) < 4 {
+ return nil, errors.New("not enough data")
+ }
+ b.u64 = uint64(le32(data[:4]))
+ data = data[4:]
+ default:
+ return nil, fmt.Errorf("unknown wire type: %d", b.typ)
+ }
+
+ return data, nil
+}
+
+func checkType(b *buffer, typ int) error {
+ if b.typ != typ {
+ return errors.New("type mismatch")
+ }
+ return nil
+}
+
+func decodeMessage(b *buffer, m message) error {
+ if err := checkType(b, 2); err != nil {
+ return err
+ }
+ dec := m.decoder()
+ data := b.data
+ for len(data) > 0 {
+ // pull varint field# + type
+ var err error
+ data, err = decodeField(b, data)
+ if err != nil {
+ return err
+ }
+ if b.field >= len(dec) || dec[b.field] == nil {
+ continue
+ }
+ if err := dec[b.field](b, m); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func decodeInt64(b *buffer, x *int64) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ *x = int64(b.u64)
+ return nil
+}
+
+func decodeInt64s(b *buffer, x *[]int64) error {
+ if b.typ == 2 {
+ // Packed encoding
+ data := b.data
+ for len(data) > 0 {
+ var u uint64
+ var err error
+
+ if u, data, err = decodeVarint(data); err != nil {
+ return err
+ }
+ *x = append(*x, int64(u))
+ }
+ return nil
+ }
+ var i int64
+ if err := decodeInt64(b, &i); err != nil {
+ return err
+ }
+ *x = append(*x, i)
+ return nil
+}
+
+func decodeUint64(b *buffer, x *uint64) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ *x = b.u64
+ return nil
+}
+
+func decodeUint64s(b *buffer, x *[]uint64) error {
+ if b.typ == 2 {
+ data := b.data
+ // Packed encoding
+ for len(data) > 0 {
+ var u uint64
+ var err error
+
+ if u, data, err = decodeVarint(data); err != nil {
+ return err
+ }
+ *x = append(*x, u)
+ }
+ return nil
+ }
+ var u uint64
+ if err := decodeUint64(b, &u); err != nil {
+ return err
+ }
+ *x = append(*x, u)
+ return nil
+}
+
+func decodeString(b *buffer, x *string) error {
+ if err := checkType(b, 2); err != nil {
+ return err
+ }
+ *x = string(b.data)
+ return nil
+}
+
+func decodeStrings(b *buffer, x *[]string) error {
+ var s string
+ if err := decodeString(b, &s); err != nil {
+ return err
+ }
+ *x = append(*x, s)
+ return nil
+}
+
+func decodeBool(b *buffer, x *bool) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ if int64(b.u64) == 0 {
+ *x = false
+ } else {
+ *x = true
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go
new file mode 100644
index 0000000..b2f9fd5
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/prune.go
@@ -0,0 +1,194 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Implements methods to remove frames from profiles.
+
+package profile
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+var (
+ reservedNames = []string{"(anonymous namespace)", "operator()"}
+ bracketRx = func() *regexp.Regexp {
+ var quotedNames []string
+ for _, name := range append(reservedNames, "(") {
+ quotedNames = append(quotedNames, regexp.QuoteMeta(name))
+ }
+ return regexp.MustCompile(strings.Join(quotedNames, "|"))
+ }()
+)
+
+// simplifyFunc does some primitive simplification of function names.
+func simplifyFunc(f string) string {
+ // Account for leading '.' on the PPC ELF v1 ABI.
+ funcName := strings.TrimPrefix(f, ".")
+ // Account for unsimplified names -- try to remove the argument list by trimming
+ // starting from the first '(', but skipping reserved names that have '('.
+ for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) {
+ foundReserved := false
+ for _, res := range reservedNames {
+ if funcName[ind[0]:ind[1]] == res {
+ foundReserved = true
+ break
+ }
+ }
+ if !foundReserved {
+ funcName = funcName[:ind[0]]
+ break
+ }
+ }
+ return funcName
+}
+
+// Prune removes all nodes beneath a node matching dropRx, and not
+// matching keepRx. If the root node of a Sample matches, the sample
+// will have an empty stack.
+func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
+ prune := make(map[uint64]bool)
+ pruneBeneath := make(map[uint64]bool)
+
+ // simplifyFunc can be expensive, so cache results.
+ // Note that the same function name can be encountered many times due
+ // different lines and addresses in the same function.
+ pruneCache := map[string]bool{} // Map from function to whether or not to prune
+ pruneFromHere := func(s string) bool {
+ if r, ok := pruneCache[s]; ok {
+ return r
+ }
+ funcName := simplifyFunc(s)
+ if dropRx.MatchString(funcName) {
+ if keepRx == nil || !keepRx.MatchString(funcName) {
+ pruneCache[s] = true
+ return true
+ }
+ }
+ pruneCache[s] = false
+ return false
+ }
+
+ for _, loc := range p.Location {
+ var i int
+ for i = len(loc.Line) - 1; i >= 0; i-- {
+ if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
+ if pruneFromHere(fn.Name) {
+ break
+ }
+ }
+ }
+
+ if i >= 0 {
+ // Found matching entry to prune.
+ pruneBeneath[loc.ID] = true
+
+ // Remove the matching location.
+ if i == len(loc.Line)-1 {
+ // Matched the top entry: prune the whole location.
+ prune[loc.ID] = true
+ } else {
+ loc.Line = loc.Line[i+1:]
+ }
+ }
+ }
+
+ // Prune locs from each Sample
+ for _, sample := range p.Sample {
+ // Scan from the root to the leaves to find the prune location.
+ // Do not prune frames before the first user frame, to avoid
+ // pruning everything.
+ foundUser := false
+ for i := len(sample.Location) - 1; i >= 0; i-- {
+ id := sample.Location[i].ID
+ if !prune[id] && !pruneBeneath[id] {
+ foundUser = true
+ continue
+ }
+ if !foundUser {
+ continue
+ }
+ if prune[id] {
+ sample.Location = sample.Location[i+1:]
+ break
+ }
+ if pruneBeneath[id] {
+ sample.Location = sample.Location[i:]
+ break
+ }
+ }
+ }
+}
+
+// RemoveUninteresting prunes and elides profiles using built-in
+// tables of uninteresting function names.
+func (p *Profile) RemoveUninteresting() error {
+ var keep, drop *regexp.Regexp
+ var err error
+
+ if p.DropFrames != "" {
+ if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
+ return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
+ }
+ if p.KeepFrames != "" {
+ if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
+ return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
+ }
+ }
+ p.Prune(drop, keep)
+ }
+ return nil
+}
+
+// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself.
+//
+// Please see the example below to understand this method as well as
+// the difference from Prune method.
+//
+// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline.
+//
+// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A.
+// Prune(A, nil) returns [B,C,B,D] by removing A itself.
+//
+// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom.
+// Prune(B, nil) returns [D] because a matching node is found by scanning from the root.
+func (p *Profile) PruneFrom(dropRx *regexp.Regexp) {
+ pruneBeneath := make(map[uint64]bool)
+
+ for _, loc := range p.Location {
+ for i := 0; i < len(loc.Line); i++ {
+ if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
+ funcName := simplifyFunc(fn.Name)
+ if dropRx.MatchString(funcName) {
+ // Found matching entry to prune.
+ pruneBeneath[loc.ID] = true
+ loc.Line = loc.Line[i:]
+ break
+ }
+ }
+ }
+ }
+
+ // Prune locs from each Sample
+ for _, sample := range p.Sample {
+ // Scan from the bottom leaf to the root to find the prune location.
+ for i, loc := range sample.Location {
+ if pruneBeneath[loc.ID] {
+ sample.Location = sample.Location[i:]
+ break
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE
new file mode 100644
index 0000000..5f920e9
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2022 Alan Shreve (@inconshreveable)
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md
new file mode 100644
index 0000000..7a950d1
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/README.md
@@ -0,0 +1,23 @@
+# mousetrap
+
+mousetrap is a tiny library that answers a single question.
+
+On a Windows machine, was the process invoked by someone double clicking on
+the executable file while browsing in explorer?
+
+### Motivation
+
+Windows developers unfamiliar with command line tools will often "double-click"
+the executable for a tool. Because most CLI tools print the help and then exit
+when invoked without arguments, this is often very frustrating for those users.
+
+mousetrap provides a way to detect these invocations so that you can provide
+more helpful behavior and instructions on how to run the CLI tool. To see what
+this looks like, both from an organizational and a technical perspective, see
+https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
+
+### The interface
+
+The library exposes a single interface:
+
+ func StartedByExplorer() (bool)
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
new file mode 100644
index 0000000..06a91f0
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
@@ -0,0 +1,16 @@
+//go:build !windows
+// +build !windows
+
+package mousetrap
+
+// StartedByExplorer returns true if the program was invoked by the user
+// double-clicking on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+//
+// On non-Windows platforms, it always returns false.
+func StartedByExplorer() bool {
+ return false
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
new file mode 100644
index 0000000..0c56880
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
@@ -0,0 +1,42 @@
+package mousetrap
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
+ snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CloseHandle(snapshot)
+ var procEntry syscall.ProcessEntry32
+ procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+ if err = syscall.Process32First(snapshot, &procEntry); err != nil {
+ return nil, err
+ }
+ for {
+ if procEntry.ProcessID == uint32(pid) {
+ return &procEntry, nil
+ }
+ err = syscall.Process32Next(snapshot, &procEntry)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ pe, err := getProcessEntry(syscall.Getppid())
+ if err != nil {
+ return false
+ }
+ return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/.gitignore b/vendor/github.com/onsi/ginkgo/v2/.gitignore
new file mode 100644
index 0000000..18793c2
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/.gitignore
@@ -0,0 +1,7 @@
+.DS_Store
+TODO
+tmp/**/*
+*.coverprofile
+.vscode
+.idea/
+*.log
\ No newline at end of file
diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
new file mode 100644
index 0000000..76577dc
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
@@ -0,0 +1,1028 @@
+## 2.19.0
+
+### Features
+
+[Label Sets](https://onsi.github.io/ginkgo/#label-sets) allow for more expressive and flexible label filtering.
+
+## 2.18.0
+
+### Features
+- Add --slience-skips and --force-newlines [f010b65]
+- fail when no tests were run and --fail-on-empty was set [d80eebe]
+
+### Fixes
+- Fix table entry context edge case [42013d6]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.20.0 to 0.21.0 (#1406) [fcf1fd7]
+- Bump github.com/onsi/gomega from 1.33.0 to 1.33.1 (#1399) [8bb14fd]
+- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#1407) [04bfad7]
+
+## 2.17.3
+
+### Fixes
+`ginkgo watch` now ignores hidden files [bde6e00]
+
+## 2.17.2
+
+### Fixes
+- fix: close files [32259c8]
+- fix github output log level for skipped specs [780e7a3]
+
+### Maintenance
+- Bump github.com/google/pprof [d91fe4e]
+- Bump github.com/go-task/slim-sprig to v3 [8cb662e]
+- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1391) [3134422]
+- Bump github-pages from 230 to 231 in /docs (#1384) [eca81b4]
+- Bump golang.org/x/tools from 0.19.0 to 0.20.0 (#1383) [760def8]
+- Bump golang.org/x/net from 0.23.0 to 0.24.0 (#1381) [4ce33f4]
+- Fix test for gomega version bump [f2fcd97]
+- Bump github.com/onsi/gomega from 1.30.0 to 1.33.0 (#1390) [fd622d2]
+- Bump golang.org/x/tools from 0.17.0 to 0.19.0 (#1368) [5474a26]
+- Bump github-pages from 229 to 230 in /docs (#1359) [e6d1170]
+- Bump google.golang.org/protobuf from 1.28.0 to 1.33.0 (#1374) [7f447b2]
+- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#1380) [f15239a]
+
+## 2.17.1
+
+### Fixes
+- If the user sets --seed=0, make sure all parallel nodes get the same seed [af0330d]
+
+## 2.17.0
+
+### Features
+
+- add `--github-output` for nicer output in github actions [e8a2056]
+
+### Maintenance
+
+- fix typo in core_dsl.go [977bc6f]
+- Fix typo in docs [e297e7b]
+
+## 2.16.0
+
+### Features
+- add SpecContext to reporting nodes
+
+### Fixes
+- merge coverages instead of combining them (#1329) (#1340) [23f0cc5]
+- core_dsl: disable Getwd() with environment variable (#1357) [cd418b7]
+
+### Maintenance
+- docs/index.md: Typo [2cebe8d]
+- fix docs [06de431]
+- chore: test with Go 1.22 (#1352) [898cba9]
+- Bump golang.org/x/tools from 0.16.1 to 0.17.0 (#1336) [17ae120]
+- Bump golang.org/x/sys from 0.15.0 to 0.16.0 (#1327) [5a179ed]
+- Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#1321) [a1e6b69]
+- Bump github-pages and jekyll-feed in /docs (#1351) [d52951d]
+- Fix docs for handling failures in goroutines (#1339) [4471b2e]
+
+## 2.15.0
+
+### Features
+
+- JUnit reports now interpret Label(owner:X) and set owner to X. [8f3bd70]
+- include cancellation reason when cancelling spec context [96e915c]
+
+### Fixes
+
+- emit output of failed go tool cover invocation so users can try to debug things for themselves [c245d09]
+- fix outline when using nodot in ginkgo v2 [dca77c8]
+- Document areas where GinkgoT() behaves differently from testing.T [dbaf18f]
+- bugfix(docs): use Unsetenv instead of Clearenv (#1337) [6f67a14]
+
+### Maintenance
+
+- Bump to go 1.20 [4fcd0b3]
+
+## 2.14.0
+
+### Features
+You can now use `GinkgoTB()` when you need an instance of `testing.TB` to pass to a library.
+
+Prior to this release table testing only supported generating individual `It`s for each test entry. `DescribeTableSubtree` extends table testing support to entire testing subtrees - under the hood `DescrieTableSubtree` generates a new container for each entry and invokes your function to fill our the container. See the [docs](https://onsi.github.io/ginkgo/#generating-subtree-tables) to learn more.
+
+- Introduce DescribeTableSubtree [65ec56d]
+- add GinkgoTB() to docs [4a2c832]
+- Add GinkgoTB() function (#1333) [92b6744]
+
+### Fixes
+- Fix typo in internal/suite.go (#1332) [beb9507]
+- Fix typo in docs/index.md (#1319) [4ac3a13]
+- allow wasm to compile with ginkgo present (#1311) [b2e5bc5]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.16.0 to 0.16.1 (#1316) [465a8ec]
+- Bump actions/setup-go from 4 to 5 (#1313) [eab0e40]
+- Bump github/codeql-action from 2 to 3 (#1317) [fbf9724]
+- Bump golang.org/x/crypto (#1318) [3ee80ee]
+- Bump golang.org/x/tools from 0.14.0 to 0.16.0 (#1306) [123e1d5]
+- Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 (#1297) [558f6e0]
+- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#1307) [84ff7f3]
+
+## 2.13.2
+
+### Fixes
+- Fix file handler leak (#1309) [e2e81c8]
+- Avoid allocations with `(*regexp.Regexp).MatchString` (#1302) [3b2a2a7]
+
+## 2.13.1
+
+### Fixes
+- # 1296 fix(precompiled test guite): exec bit check omitted on Windows (#1301) [26eea01]
+
+### Maintenance
+- Bump github.com/go-logr/logr from 1.2.4 to 1.3.0 (#1291) [7161a9d]
+- Bump golang.org/x/sys from 0.13.0 to 0.14.0 (#1295) [7fc7b10]
+- Bump golang.org/x/tools from 0.12.0 to 0.14.0 (#1282) [74bbd65]
+- Bump github.com/onsi/gomega from 1.27.10 to 1.29.0 (#1290) [9373633]
+- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1286) [6e3cf65]
+
+## 2.13.0
+
+### Features
+
+Add PreviewSpect() to enable programmatic preview access to the suite report (fixes #1225)
+
+## 2.12.1
+
+### Fixes
+- Print logr prefix if it exists (#1275) [90d4846]
+
+### Maintenance
+- Bump actions/checkout from 3 to 4 (#1271) [555f543]
+- Bump golang.org/x/sys from 0.11.0 to 0.12.0 (#1270) [d867b7d]
+
+## 2.12.0
+
+### Features
+
+- feat: allow MustPassRepeatedly decorator to be set at suite level (#1266) [05de518]
+
+### Fixes
+
+- fix-errors-in-readme (#1244) [27c2f5d]
+
+### Maintenance
+
+Various chores/dependency bumps.
+
+## 2.11.0
+
+In prior versions of Ginkgo specs the CLI filter flags (e.g. `--focus`, `--label-filter`) would _override_ any programmatic focus. This behavior has proved surprising and confusing in at least the following ways:
+
+- users cannot combine programmatic filters and CLI filters to more efficiently select subsets of tests
+- CLI filters can override programmatic focus on CI systems resulting in an exit code of 0 despite the presence of (incorrectly!) committed focused specs.
+
+Going forward Ginkgo will AND all programmatic and CLI filters. Moreover, the presence of any programmatic focused tests will always result in a non-zero exit code.
+
+This change is technically a change in Ginkgo's external contract and may require some users to make changes to successfully adopt. Specifically: it's possible some users were intentionally using CLI filters to override programmatic focus. If this is you please open an issue so we can explore solutions to the underlying problem you are trying to solve.
+
+### Fixes
+- Programmatic focus is no longer overwrriten by CLI filters [d6bba86]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.27.7 to 1.27.8 (#1218) [4a70a38]
+- Bump golang.org/x/sys from 0.8.0 to 0.9.0 (#1219) [97eda4d]
+
+## 2.10.0
+
+### Features
+- feat(ginkgo/generators): add --tags flag (#1216) [a782a77]
+ adds a new --tags flag to ginkgo generate
+
+### Fixes
+- Fix broken link of MIGRATING_TO_V2.md (#1217) [548d78e]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.9.1 to 0.9.3 (#1215) [2b76a5e]
+
+## 2.9.7
+
+### Fixes
+- fix race when multiple defercleanups are called in goroutines [07fc3a0]
+
+## 2.9.6
+
+### Fixes
+- fix: create parent directory before report files (#1212) [0ac65de]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.27.6 to 1.27.7 (#1202) [3e39231]
+
+## 2.9.5
+
+### Fixes
+- ensure the correct deterministic sort order is produced when ordered specs are generated by a helper function [7fa0b6b]
+
+### Maintenance
+- fix generators link (#1200) [9f9d8b9]
+- Bump golang.org/x/tools from 0.8.0 to 0.9.1 (#1196) [150e3f2]
+- fix spelling err in docs (#1199) [0013b1a]
+- Bump golang.org/x/sys from 0.7.0 to 0.8.0 (#1193) [9e9e3e5]
+
+## 2.9.4
+
+### Fixes
+- fix hang with ginkgo -p (#1192) [15d4bdc] - this addresses a _long_ standing issue related to Ginkgo hanging when a child process spawned by the test does not exit.
+
+- fix: fail fast may cause Serial spec or cleanup Node interrupted (#1178) [8dea88b] - prior to this there was a small gap in which specs on other processes might start even if one process has tried to abort the suite.
+
+
+### Maintenance
+- Document run order when multiple setup nodes are at the same nesting level [903be81]
+
+## 2.9.3
+
+### Features
+- Add RenderTimeline to GinkgoT() [c0c77b6]
+
+### Fixes
+- update Measure deprecation message. fixes #1176 [227c662]
+- add newlines to GinkgoLogr (#1170) (#1171) [0de0e7c]
+
+### Maintenance
+- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#1183) [8b925ab]
+- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#1184) [e3795a4]
+- Bump golang.org/x/tools from 0.7.0 to 0.8.0 (#1182) [b453793]
+- Bump actions/setup-go from 3 to 4 (#1164) [73ed75b]
+- Bump github.com/onsi/gomega from 1.27.4 to 1.27.6 (#1173) [0a2bc64]
+- Bump github.com/go-logr/logr from 1.2.3 to 1.2.4 (#1174) [f41c557]
+- Bump golang.org/x/sys from 0.6.0 to 0.7.0 (#1179) [8e423e5]
+
+## 2.9.2
+
+### Maintenance
+- Bump github.com/go-task/slim-sprig (#1167) [3fcc5bf]
+- Bump github.com/onsi/gomega from 1.27.3 to 1.27.4 (#1163) [6143ffe]
+
+## 2.9.1
+
+### Fixes
+This release fixes a longstanding issue where `ginkgo -coverpkg=./...` would not work. This is now resolved and fixes [#1161](https://github.com/onsi/ginkgo/issues/1161) and [#995](https://github.com/onsi/ginkgo/issues/995)
+- Support -coverpkg=./... [26ca1b5]
+- document coverpkg a bit more clearly [fc44c3b]
+
+### Maintenance
+- bump various dependencies
+- Improve Documentation and fix typo (#1158) [93de676]
+
+## 2.9.0
+
+### Features
+- AttachProgressReporter is an experimental feature that allows users to provide arbitrary information when a ProgressReport is requested [28801fe]
+
+- GinkgoT() has been expanded to include several Ginkgo-specific methods [2bd5a3b]
+
+ The intent is to enable the development of third-party libraries that integrate deeply with Ginkgo using `GinkgoT()` to access Ginkgo's functionality.
+
+## 2.8.4
+
+### Features
+- Add OmitSuiteSetupNodes to JunitReportConfig (#1147) [979fbc2]
+- Add a reference to ginkgolinter in docs.index.md (#1143) [8432589]
+
+### Fixes
+- rename tools hack to see if it fixes things for downstream users [a8bb39a]
+
+### Maintenance
+- Bump golang.org/x/text (#1144) [41b2a8a]
+- Bump github.com/onsi/gomega from 1.27.0 to 1.27.1 (#1142) [7c4f583]
+
+## 2.8.3
+
+Released to fix security issue in golang.org/x/net dependency
+
+### Maintenance
+
+- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#1141) [fc1a02e]
+- remove tools.go hack from documentation [0718693]
+
+## 2.8.2
+
+Ginkgo now includes a `tools.go` file in the root directory of the `ginkgo` package. This should allow modules that simply `go get github.com/onsi/ginkgo/v2` to also pull in the CLI dependencies. This obviates the need for consumers of Ginkgo to have their own `tools.go` file and makes it simpler to ensure that the version of the `ginkgo` CLI being used matches the version of the library. You can simply run `go run github.com/onsi/ginkgo/v2/ginkgo` to run the version of the cli associated with your package go.mod.
+
+### Maintenance
+
+- Bump github.com/onsi/gomega from 1.26.0 to 1.27.0 (#1139) [5767b0a]
+- Fix minor typos (#1138) [e1e9723]
+- Fix link in V2 Migration Guide (#1137) [a588f60]
+
+## 2.8.1
+
+### Fixes
+- lock around default report output to avoid triggering the race detector when calling By from goroutines [2d5075a]
+- don't run ReportEntries through sprintf [febbe38]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.5.0 to 0.6.0 (#1135) [11a4860]
+- test: update matrix for Go 1.20 (#1130) [4890a62]
+- Bump golang.org/x/sys from 0.4.0 to 0.5.0 (#1133) [a774638]
+- Bump github.com/onsi/gomega from 1.25.0 to 1.26.0 (#1120) [3f233bd]
+- Bump github-pages from 227 to 228 in /docs (#1131) [f9b8649]
+- Bump activesupport from 6.0.6 to 6.0.6.1 in /docs (#1127) [6f8c042]
+- Update index.md with instructions on how to upgrade Ginkgo [833a75e]
+
+## 2.8.0
+
+### Features
+
+- Introduce GinkgoHelper() to track and exclude helper functions from potential CodeLocations [e19f556]
+
+Modeled after `testing.T.Helper()`. Now, rather than write code like:
+
+```go
+func helper(model Model) {
+ Expect(model).WithOffset(1).To(BeValid())
+ Expect(model.SerialNumber).WithOffset(1).To(MatchRegexp(/[a-f0-9]*/))
+}
+```
+
+you can stop tracking offsets (which makes nesting composing helpers nearly impossible) and simply write:
+
+```go
+func helper(model Model) {
+ GinkgoHelper()
+ Expect(model).To(BeValid())
+ Expect(model.SerialNumber).To(MatchRegexp(/[a-f0-9]*/))
+}
+```
+
+- Introduce GinkgoLabelFilter() and Label().MatchesLabelFilter() to make it possible to programmatically match filters (fixes #1119) [2f6597c]
+
+You can now write code like this:
+
+```go
+BeforeSuite(func() {
+ if Label("slow").MatchesLabelFilter(GinkgoLabelFilter()) {
+ // do slow setup
+ }
+
+ if Label("fast").MatchesLabelFilter(GinkgoLabelFilter()) {
+ // do fast setup
+ }
+})
+```
+
+to programmatically check whether a given set of labels will match the configured `--label-filter`.
+
+### Maintenance
+
+- Bump webrick from 1.7.0 to 1.8.1 in /docs (#1125) [ea4966e]
+- cdeql: add ruby language (#1124) [9dd275b]
+- dependabot: add bundler package-ecosystem for docs (#1123) [14e7bdd]
+
+## 2.7.1
+
+### Fixes
+- Bring back SuiteConfig.EmitSpecProgress to avoid compilation issue for consumers that set it manually [d2a1cb0]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.24.2 to 1.25.0 (#1118) [cafece6]
+- Bump golang.org/x/tools from 0.4.0 to 0.5.0 (#1111) [eda66c2]
+- Bump golang.org/x/sys from 0.3.0 to 0.4.0 (#1112) [ac5ccaa]
+- Bump github.com/onsi/gomega from 1.24.1 to 1.24.2 (#1097) [eee6480]
+
+## 2.7.0
+
+### Features
+- Introduce ContinueOnFailure for Ordered containers [e0123ca] - Ordered containers that are also decorated with ContinueOnFailure will not stop running specs after the first spec fails.
+- Support for bootstrap commands to use custom data for templates (#1110) [7a2b242]
+- Support for labels and pending decorator in ginkgo outline output (#1113) [e6e3b98]
+- Color aliases for custom color support (#1101) [49fab7a]
+
+### Fixes
+- correctly ensure deterministic spec order, even if specs are generated by iterating over a map [89dda20]
+- Fix a bug where timedout specs were not correctly treated as failures when determining whether or not to run AfterAlls in an Ordered container.
+- Ensure go test coverprofile outputs to the expected location (#1105) [b0bd77b]
+
+## 2.6.1
+
+### Features
+- Override formatter colors from envvars - this is a new feature but an alternative approach involving config files might be taken in the future (#1095) [60240d1]
+
+### Fixes
+- GinkgoRecover now supports ignoring panics that match a specific, hidden, interface [301f3e2]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.24.0 to 1.24.1 (#1077) [3643823]
+- Bump golang.org/x/tools from 0.2.0 to 0.4.0 (#1090) [f9f856e]
+- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#1091) [0d7087e]
+
+## 2.6.0
+
+### Features
+- `ReportBeforeSuite` provides access to the suite report before the suite begins.
+- Add junit config option for omitting leafnodetype (#1088) [956e6d2]
+- Add support to customize junit report config to omit spec labels (#1087) [de44005]
+
+### Fixes
+- Fix stack trace pruning so that it has a chance of working on windows [2165648]
+
+## 2.5.1
+
+### Fixes
+- skipped tests only show as 'S' when running with -v [3ab38ae]
+- Fix typo in docs/index.md (#1082) [55fc58d]
+- Fix typo in docs/index.md (#1081) [8a14f1f]
+- Fix link notation in docs/index.md (#1080) [2669612]
+- Fix typo in `--progress` deprecation message (#1076) [b4b7edc]
+
+### Maintenance
+- chore: Included githubactions in the dependabot config (#976) [baea341]
+- Bump golang.org/x/sys from 0.1.0 to 0.2.0 (#1075) [9646297]
+
+## 2.5.0
+
+### Ginkgo output now includes a timeline-view of the spec
+
+This commit changes Ginkgo's default output. Spec details are now
+presented as a **timeline** that includes events that occur during the spec
+lifecycle interleaved with any GinkgoWriter content. This makes is much easier
+to understand the flow of a spec and where a given failure occurs.
+
+The --progress, --slow-spec-threshold, --always-emit-ginkgo-writer flags
+and the SuppressProgressReporting decorator have all been deprecated. Instead
+the existing -v and -vv flags better capture the level of verbosity to display. However,
+a new --show-node-events flag is added to include node `> Enter` and `< Exit` events
+in the spec timeline.
+
+In addition, JUnit reports now include the timeline (rendered with -vv) and custom JUnit
+reports can be configured and generated using
+`GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig)`
+
+Code should continue to work unchanged with this version of Ginkgo - however if you have tooling that
+was relying on the specific output format of Ginkgo you _may_ run into issues. Ginkgo's console output is not guaranteed to be stable for tooling and automation purposes. You should, instead, use Ginkgo's JSON format
+to build tooling on top of as it has stronger guarantees to be stable from version to version.
+
+### Features
+- Provide details about which timeout expired [0f2fa27]
+
+### Fixes
+- Add Support Policy to docs [c70867a]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.22.1 to 1.23.0 (#1070) [bb3b4e2]
+
+## 2.4.0
+
+### Features
+
+- DeferCleanup supports functions with multiple-return values [5e33c75]
+- Add GinkgoLogr (#1067) [bf78c28]
+- Introduction of 'MustPassRepeatedly' decorator (#1051) [047c02f]
+
+### Fixes
+- correcting some typos (#1064) [1403d3c]
+- fix flaky internal_integration interrupt specs [2105ba3]
+- Correct busted link in README [be6b5b9]
+
+### Maintenance
+- Bump actions/checkout from 2 to 3 (#1062) [8a2f483]
+- Bump golang.org/x/tools from 0.1.12 to 0.2.0 (#1065) [529c4e8]
+- Bump github/codeql-action from 1 to 2 (#1061) [da09146]
+- Bump actions/setup-go from 2 to 3 (#1060) [918040d]
+- Bump github.com/onsi/gomega from 1.22.0 to 1.22.1 (#1053) [2098e4d]
+- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#1066) [1d74122]
+- Add GHA to dependabot config [4442772]
+
+## 2.3.1
+
+## Fixes
+Several users were invoking `ginkgo` by installing the latest version of the cli via `go install github.com/onsi/ginkgo/v2/ginkgo@latest`. When 2.3.0 was released this resulted in an influx of issues as CI systems failed due to a change in the internal contract between the Ginkgo CLI and the Ginkgo library. Ginkgo only supports running the same version of the library as the cli (which is why both are packaged in the same repository).
+
+With this patch release, the ginkgo CLI can now identify a version mismatch and emit a helpful error message.
+
+- Ginkgo cli can identify version mismatches and emit a helpful error message [bc4ae2f]
+- further emphasize that a version match is required when running Ginkgo on CI and/or locally [2691dd8]
+
+### Maintenance
+- bump gomega to v1.22.0 [822a937]
+
+## 2.3.0
+
+### Interruptible Nodes and Timeouts
+
+Ginkgo now supports per-node and per-spec timeouts on interruptible nodes. Check out the [documentation for all the details](https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes) but the gist is you can now write specs like this:
+
+```go
+It("is interruptible", func(ctx SpecContext) { // or context.Context instead of SpecContext, both are valid.
+ // do things until `ctx.Done()` is closed, for example:
+ req, err := http.NewRequestWithContext(ctx, "POST", "/build-widgets", nil)
+ Expect(err).NotTo(HaveOccured())
+ _, err := http.DefaultClient.Do(req)
+ Expect(err).NotTo(HaveOccured())
+
+ Eventually(client.WidgetCount).WithContext(ctx).Should(Equal(17))
+}, NodeTimeout(time.Second*20), GracePeriod(5*time.Second))
+```
+
+and have Ginkgo ensure that the node completes before the timeout elapses. If it does elapse, or if an external interrupt is received (e.g. `^C`) then Ginkgo will cancel the context and wait for the Grace Period for the node to exit before proceeding with any cleanup nodes associated with the spec. The `ctx` provided by Ginkgo can also be passed down to Gomega's `Eventually` to have all assertions within the node governed by a single deadline.
+
+### Features
+
+- Ginkgo now records any additional failures that occur during the cleanup of a failed spec. In prior versions this information was quietly discarded, but the introduction of a more rigorous approach to timeouts and interruptions allows Ginkgo to better track subsequent failures.
+- `SpecContext` also provides a mechanism for third-party libraries to provide additional information when a Progress Report is generated. Gomega uses this to provide the current state of an `Eventually().WithContext()` assertion when a Progress Report is requested.
+- DescribeTable now exits with an error if it is not passed any Entries [a4c9865]
+
+## Fixes
+- fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency [92c88d5]
+- Make the outline command able to use the DSL import [1be2427]
+
+## Maintenance
+- chore(docs): delete no meaning d [57c373c]
+- chore(docs): Fix hyperlinks [30526d5]
+- chore(docs): fix code blocks without language settings [cf611c4]
+- fix intra-doc link [b541bcb]
+
+## 2.2.0
+
+### Generate real-time Progress Reports [f91377c]
+
+Ginkgo can now generate Progress Reports to point users at the current running line of code (including a preview of the actual source code) and a best guess at the most relevant subroutines.
+
+These Progress Reports allow users to debug stuck or slow tests without exiting the Ginkgo process. A Progress Report can be generated at any time by sending Ginkgo a `SIGINFO` (`^T` on MacOS/BSD) or `SIGUSR1`.
+
+In addition, the user can specify `--poll-progress-after` and `--poll-progress-interval` to have Ginkgo start periodically emitting progress reports if a given node takes too long. These can be overriden/set on a per-node basis with the `PollProgressAfter` and `PollProgressInterval` decorators.
+
+Progress Reports are emitted to stdout, and also stored in the machine-redable report formats that Ginkgo supports.
+
+Ginkgo also uses this progress reporting infrastructure under the hood when handling timeouts and interrupts. This yields much more focused, useful, and informative stack traces than previously.
+
+### Features
+- `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes.
+
+ As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature.
+
+### Maintenance
+- Modernize the invocation of Ginkgo in github actions [0ffde58]
+- Update reocmmended CI settings in docs [896bbb9]
+- Speed up unnecessarily slow integration test [6d3a90e]
+
+## 2.1.6
+
+### Fixes
+- Add `SuppressProgressReporting` decorator to turn off --progress announcements for a given node [dfef62a]
+- chore: remove duplicate word in comments [7373214]
+
+## 2.1.5
+
+### Fixes
+- drop -mod=mod instructions; fixes #1026 [6ad7138]
+- Ensure `CurrentSpecReport` and `AddReportEntry` are thread-safe [817c09b]
+- remove stale importmap gcflags flag test [3cd8b93]
+- Always emit spec summary [5cf23e2] - even when only one spec has failed
+- Fix ReportAfterSuite usage in docs [b1864ad]
+- fixed typo (#997) [219cc00]
+- TrimRight is not designed to trim Suffix [71ebb74]
+- refactor: replace strings.Replace with strings.ReplaceAll (#978) [143d208]
+- fix syntax in examples (#975) [b69554f]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.20.0 to 1.20.1 (#1027) [e5dfce4]
+- Bump tzinfo from 1.2.9 to 1.2.10 in /docs (#1006) [7ae91c4]
+- Bump github.com/onsi/gomega from 1.19.0 to 1.20.0 (#1005) [e87a85a]
+- test: add new Go 1.19 to test matrix (#1014) [bbefe12]
+- Bump golang.org/x/tools from 0.1.11 to 0.1.12 (#1012) [9327906]
+- Bump golang.org/x/tools from 0.1.10 to 0.1.11 (#993) [f44af96]
+- Bump nokogiri from 1.13.3 to 1.13.6 in /docs (#981) [ef336aa]
+
+## 2.1.4
+
+### Fixes
+- Numerous documentation typos
+- Prepend `when` when using `When` (this behavior was in 1.x but unintentionally lost during the 2.0 rewrite) [efce903]
+- improve error message when a parallel process fails to report back [a7bd1fe]
+- guard against concurrent map writes in DeprecationTracker [0976569]
+- Invoke reporting nodes during dry-run (fixes #956 and #935) [aae4480]
+- Fix ginkgo import circle [f779385]
+
+## 2.1.3
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
+
+### Fixes
+- Calling By in a container node now emits a useful error. [ff12cee]
+
+## 2.1.2
+
+### Fixes
+
+- Track location of focused specs correctly in `ginkgo unfocus` [a612ff1]
+- Profiling suites with focused specs no longer generates an erroneous failure message [8fbfa02]
+- Several documentation typos fixed. Big thanks to everyone who helped catch them and report/fix them!
+
+## 2.1.1
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
+
+### Fixes
+- Suites that only import the new dsl packages are now correctly identified as Ginkgo suites [ec17e17]
+
+## 2.1.0
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
+
+2.1.0 is a minor release with a few tweaks:
+
+- Introduce new DSL packages to enable users to pick-and-choose which portions of the DSL to dot-import. [90868e2] More details [here](https://onsi.github.io/ginkgo/#alternatives-to-dot-importing-ginkgo).
+- Add error check for invalid/nil parameters to DescribeTable [6f8577e]
+- Myriad docs typos fixed (thanks everyone!) [718542a, ecb7098, 146654c, a8f9913, 6bdffde, 03dcd7e]
+
+## 2.0.0
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)
+
+## 1.16.5
+
+Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC.
+1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess
+
+You can silence the RC advertisement by setting an `ACK_GINKGO_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc`
+
+## 1.16.4
+
+### Fixes
+1.16.4 retracts 1.16.3. There are no code changes. The 1.16.3 tag was associated with the wrong commit and an attempt to change it after-the-fact has proven problematic. 1.16.4 retracts 1.16.3 in Ginkgo's go.mod and creates a new, correctly tagged, release.
+
+## 1.16.3
+
+### Features
+- Measure is now deprecated and emits a deprecation warning.
+
+## 1.16.2
+
+### Fixes
+- Deprecations can be suppressed by setting an `ACK_GINKGO_DEPRECATIONS=` environment variable.
+
+## 1.16.1
+
+### Fixes
+- Suppress --stream deprecation warning on windows (#793)
+
+## 1.16.0
+
+### Features
+- Advertise Ginkgo 2.0. Introduce deprecations. [9ef1913]
+ - Update README.md to advertise that Ginkgo 2.0 is coming.
+ - Backport the 2.0 DeprecationTracker and start alerting users
+ about upcoming deprecations.
+
+- Add slim-sprig template functions to bootstrap/generate (#775) [9162b86]
+
+- Fix accidental reference to 1488 (#784) [9fb7fe4]
+
+## 1.15.2
+
+### Fixes
+- ignore blank `-focus` and `-skip` flags (#780) [e90a4a0]
+
+## 1.15.1
+
+### Fixes
+- reporters/junit: Use `system-out` element instead of `passed` (#769) [9eda305]
+
+## 1.15.0
+
+### Features
+- Adds 'outline' command to print the outline of specs/containers in a file (#754) [071c369] [6803cc3] [935b538] [06744e8] [0c40583]
+- Add support for using template to generate tests (#752) [efb9e69]
+- Add a Chinese Doc #755 (#756) [5207632]
+- cli: allow multiple -focus and -skip flags (#736) [9a782fb]
+
+### Fixes
+- Add _internal to filename of tests created with internal flag (#751) [43c12da]
+
+## 1.14.2
+
+### Fixes
+- correct handling windows backslash in import path (#721) [97f3d51]
+- Add additional methods to GinkgoT() to improve compatibility with the testing.TB interface [b5fe44d]
+
+## 1.14.1
+
+### Fixes
+- Discard exported method declaration when running ginkgo bootstrap (#558) [f4b0240]
+
+## 1.14.0
+
+### Features
+- Defer running top-level container nodes until RunSpecs is called [d44dedf]
+- [Document Ginkgo lifecycle](http://onsi.github.io/ginkgo/#understanding-ginkgos-lifecycle)
+- Add `extensions/globals` package (#692) [3295c8f] - this can be helpful in contexts where you are test-driving your test-generation code (see [#692](https://github.com/onsi/ginkgo/pull/692))
+- Print Skip reason in JUnit reporter if one was provided [820dfab]
+
+## 1.13.0
+
+### Features
+- Add a version of table.Entry that allows dumping the entry parameters. (#689) [21eaef2]
+
+### Fixes
+- Ensure integration tests pass in an environment sans GOPATH [606fba2]
+- Add books package (#568) [fc0e44e]
+- doc(readme): installation via "tools package" (#677) [83bb20e]
+- Solve the undefined: unix.Dup2 compile error on mips64le (#680) [0624f75]
+- Import package without dot (#687) [6321024]
+- Fix integration tests to stop require GOPATH (#686) [a912ec5]
+
+## 1.12.3
+
+### Fixes
+- Print correct code location of failing table test (#666) [c6d7afb]
+
+## 1.12.2
+
+### Fixes
+- Update dependencies [ea4a036]
+
+## 1.12.1
+
+### Fixes
+- Make unfocus ("blur") much faster (#674) [8b18061]
+- Fix typo (#673) [7fdcbe8]
+- Test against 1.14 and remove 1.12 [d5c2ad6]
+- Test if a coverprofile content is empty before checking its latest character (#670) [14d9fa2]
+- replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4]
+- improve ginkgo performance - makes progress on #644 [a14f98e]
+- fix convert integration tests [1f8ba69]
+- fix typo successful -> successful (#663) [1ea49cf]
+- Fix invalid link (#658) [b886136]
+- convert utility : Include comments from source (#657) [1077c6d]
+- Explain what BDD means [d79e7fb]
+- skip race detector test on unsupported platform (#642) [f8ab89d]
+- Use Dup2 from golang.org/x/sys/unix instead of syscallDup (#638) [5d53c55]
+- Fix missing newline in combined coverage file (#641) [6a07ea2]
+- check if a spec is run before returning SpecSummary (#645) [8850000]
+
+## 1.12.0
+
+### Features
+- Add module definition (#630) [78916ab]
+
+## 1.11.0
+
+### Features
+- Add syscall for riscv64 architecture [f66e896]
+- teamcity reporter: output location of test failure as well as test definition (#626) [9869142]
+- teamcity reporter: output newline after every service message (#625) [3cfa02d]
+- Add support for go module when running `generate` command (#578) [9c89e3f]
+
+## 1.10.3
+
+### Fixes
+- Set go_import_path in travis.yml to allow internal packages in forks (#607) [3b721db]
+- Add integration test [d90e0dc]
+- Fix coverage files combining [e5dde8c]
+- A new CLI option: -ginkgo.reportFile (#601) [034fd25]
+
+## 1.10.2
+
+### Fixes
+- speed up table entry generateIt() (#609) [5049dc5]
+- Fix. Write errors to stderr instead of stdout (#610) [7bb3091]
+
+## 1.10.1
+
+### Fixes
+- stack backtrace: fix skipping (#600) [2a4c0bd]
+
+## 1.10.0
+
+### Fixes
+- stack backtrace: fix alignment and skipping [66915d6]
+- fix typo in documentation [8f97b93]
+
+## 1.9.0
+
+### Features
+- Option to print output into report, when tests have passed [0545415]
+
+### Fixes
+- Fixed typos in comments [0ecbc58]
+- gofmt code [a7f8bfb]
+- Simplify code [7454d00]
+- Simplify concatenation, incrementation and function assignment [4825557]
+- Avoid unnecessary conversions [9d9403c]
+- JUnit: include more detailed information about panic [19cca4b]
+- Print help to stdout when the user asks for help [4cb7441]
+
+
+## 1.8.0
+
+### New Features
+- allow config of the vet flag for `go test` (#562) [3cd45fa]
+- Support projects using go modules [d56ee76]
+
+### Fixes and Minor Improvements
+- chore(godoc): fixes typos in Measurement funcs [dbaca8e]
+- Optimize focus to avoid allocations [f493786]
+- Ensure generated test file names are underscored [505cc35]
+
+## 1.7.0
+
+### New Features
+- Add JustAfterEach (#484) [0d4f080]
+
+### Fixes
+- Correctly round suite time in junit reporter [2445fc1]
+- Avoid using -i argument to go test for Golang 1.10+ [46bbc26]
+
+## 1.6.0
+
+### New Features
+- add --debug flag to emit node output to files (#499) [39febac]
+
+### Fixes
+- fix: for `go vet` to pass [69338ec]
+- docs: fix for contributing instructions [7004cb1]
+- consolidate and streamline contribution docs (#494) [d848015]
+- Make generated Junit file compatible with "Maven Surefire" (#488) [e51bee6]
+- all: gofmt [000d317]
+- Increase eventually timeout to 30s [c73579c]
+- Clarify asynchronous test behavior [294d8f4]
+- Travis badge should only show master [26d2143]
+
+## 1.5.0 5/10/2018
+
+### New Features
+- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8]
+- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8]
+- Re-add noisySkippings flag [652e15c]
+- Allow coverage to be displayed for focused specs (#367) [11459a8]
+- Handle -outputdir flag (#364) [228e3a8]
+- Handle -coverprofile flag (#355) [43392d5]
+
+### Fixes
+- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23]
+- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0]
+- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd]
+- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98]
+- Increase the threshold when checking time measurements (#455) [2f714bf, 68f622c]
+- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b]
+- Add an extra new line after reporting spec run completion for test2json [874520d]
+- added name name field to junit reported testsuite [ae61c63]
+- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856]
+- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe]
+- Synchronies the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
+- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed]
+- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8]
+- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598]
+- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa]
+- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8]
+- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9]
+- Replace GOPATH in Environment [4b883f0]
+
+
+## 1.4.0 7/16/2017
+
+- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345)
+- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357]
+- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356]
+- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277]
+- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344]
+- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248]
+
+## 1.3.0 3/28/2017
+
+Improvements:
+
+- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency.
+- `Skip(message)` can be used to skip the current test.
+- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
+- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed`
+- Support for retrying flaky tests with `--flakeAttempts`
+- `ginkgo ./...` now recurses as you'd expect
+- Added `Specify` a synonym for `It`
+- Support colorise on Windows
+- Broader support for various go compilation flags in the `ginkgo` CLI
+
+Bug Fixes:
+
+- Ginkgo tests now fail when you `panic(nil)` (#167)
+
+## 1.2.0 5/31/2015
+
+Improvements
+
+- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
+- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
+- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
+
+## 1.2.0-beta
+
+Ginkgo now requires Go 1.4+
+
+Improvements:
+
+- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
+- Improved focus behavior. Now, this:
+
+ ```golang
+ FDescribe("Some describe", func() {
+ It("A", func() {})
+
+ FIt("B", func() {})
+ })
+ ```
+
+ will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
+- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
+- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
+- Improved output when an error occurs in a setup or teardown block.
+- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
+- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
+- Add support for precompiled tests:
+ - `ginkgo build ` will now compile the package, producing a file named `package.test`
+ - The compiled `package.test` file can be run directly. This runs the tests in series.
+ - To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
+- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
+- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
+- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
+- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
+- `ginkgo -notify` now works on Linux
+
+Bug Fixes:
+
+- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
+- Fix tempfile leak when running in parallel
+- Fix incorrect failure message when a panic occurs during a parallel test run
+- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
+- Be more consistent about handling SIGTERM as well as SIGINT
+- When interrupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
+- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
+
+## 1.1.0 (8/2/2014)
+
+No changes, just dropping the beta.
+
+## 1.1.0-beta (7/22/2014)
+New Features:
+
+- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
+- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, even when they pass. This allows CI systems to detect accidental commits of focused test suites.
+- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
+- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
+- `ginkgo --failFast` aborts the test suite after the first failure.
+- `ginkgo generate file_1 file_2` can take multiple file arguments.
+- Ginkgo now summarizes any spec failures that occurred at the end of the test run.
+- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
+
+Improvements:
+
+- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
+- `ginkgo --untilItFails` no longer recompiles between attempts.
+- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
+
+Bug Fixes:
+
+- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
+- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
+
+## 1.0.0 (5/24/2014)
+New Features:
+
+- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
+
+Improvements:
+
+- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
+- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
+
+Bug Fixes:
+
+- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
+- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
+- Fix all remaining race conditions in Ginkgo's test suite.
+
+## 1.0.0-beta (4/14/2014)
+Breaking changes:
+
+- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
+- Modified the Reporter interface
+- `watch` is now a subcommand, not a flag.
+
+DSL changes:
+
+- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
+- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
+- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
+
+CLI changes:
+
+- `watch` is now a subcommand, not a flag
+- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
+- Additional arguments can be passed to specs. Pass them after the `--` separator
+- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
+- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
+
+Misc:
+
+- Start using semantic versioning
+- Start maintaining changelog
+
+Major refactor:
+
+- Pull out Ginkgo's internal to `internal`
+- Rename `example` everywhere to `spec`
+- Much more!
diff --git a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
new file mode 100644
index 0000000..80de566
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
@@ -0,0 +1,15 @@
+# Contributing to Ginkgo
+
+Your contributions to Ginkgo are essential for its long-term maintenance and improvement.
+
+- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
+- Ensure adequate test coverage:
+ - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
+ - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
+- Run `make` or:
+ - Install ginkgo locally via `go install ./...`
+ - Make sure all the tests succeed via `ginkgo -r -p`
+ - Vet your changes via `go vet ./...`
+- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle && bundle exec jekyll serve` in the `docs` directory to preview your changes.
+
+Thanks for supporting Ginkgo!
diff --git a/vendor/github.com/onsi/ginkgo/v2/LICENSE b/vendor/github.com/onsi/ginkgo/v2/LICENSE
new file mode 100644
index 0000000..9415ee7
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2013-2014 Onsi Fakhouri
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/onsi/ginkgo/v2/Makefile b/vendor/github.com/onsi/ginkgo/v2/Makefile
new file mode 100644
index 0000000..cb099af
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/Makefile
@@ -0,0 +1,11 @@
+# default task since it's first
+.PHONY: all
+all: vet test
+
+.PHONY: test
+test:
+ go run github.com/onsi/ginkgo/v2/ginkgo -r -p
+
+.PHONY: vet
+vet:
+ go vet ./...
diff --git a/vendor/github.com/onsi/ginkgo/v2/OWNERS b/vendor/github.com/onsi/ginkgo/v2/OWNERS
new file mode 100644
index 0000000..bfdc4ad
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/OWNERS
@@ -0,0 +1,5 @@
+approvers:
+ - bertinatto
+ - bparees
+ - soltysh
+ - stbenjam
diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md
new file mode 100644
index 0000000..cb23ffd
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/README.md
@@ -0,0 +1,115 @@
+
+
+[](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/)
+
+---
+
+# Ginkgo
+
+Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly:
+
+```go
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ ...
+)
+
+var _ = Describe("Checking books out of the library", Label("library"), func() {
+ var library *libraries.Library
+ var book *books.Book
+ var valjean *users.User
+ BeforeEach(func() {
+ library = libraries.NewClient()
+ book = &books.Book{
+ Title: "Les Miserables",
+ Author: "Victor Hugo",
+ }
+ valjean = users.NewUser("Jean Valjean")
+ })
+
+ When("the library has the book in question", func() {
+ BeforeEach(func(ctx SpecContext) {
+ Expect(library.Store(ctx, book)).To(Succeed())
+ })
+
+ Context("and the book is available", func() {
+ It("lends it to the reader", func(ctx SpecContext) {
+ Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed())
+ Expect(valjean.Books()).To(ContainElement(book))
+ Expect(library.UserWithBook(ctx, book)).To(Equal(valjean))
+ }, SpecTimeout(time.Second * 5))
+ })
+
+ Context("but the book has already been checked out", func() {
+ var javert *users.User
+ BeforeEach(func(ctx SpecContext) {
+ javert = users.NewUser("Javert")
+ Expect(javert.Checkout(ctx, library, "Les Miserables")).To(Succeed())
+ })
+
+ It("tells the user", func(ctx SpecContext) {
+ err := valjean.Checkout(ctx, library, "Les Miserables")
+ Expect(err).To(MatchError("Les Miserables is currently checked out"))
+ }, SpecTimeout(time.Second * 5))
+
+ It("lets the user place a hold and get notified later", func(ctx SpecContext) {
+ Expect(valjean.Hold(ctx, library, "Les Miserables")).To(Succeed())
+ Expect(valjean.Holds(ctx)).To(ContainElement(book))
+
+ By("when Javert returns the book")
+ Expect(javert.Return(ctx, library, book)).To(Succeed())
+
+ By("it eventually informs Valjean")
+ notification := "Les Miserables is ready for pick up"
+ Eventually(ctx, valjean.Notifications).Should(ContainElement(notification))
+
+ Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed())
+ Expect(valjean.Books(ctx)).To(ContainElement(book))
+ Expect(valjean.Holds(ctx)).To(BeEmpty())
+ }, SpecTimeout(time.Second * 10))
+ })
+ })
+
+ When("the library does not have the book in question", func() {
+ It("tells the reader the book is unavailable", func(ctx SpecContext) {
+ err := valjean.Checkout(ctx, library, "Les Miserables")
+ Expect(err).To(MatchError("Les Miserables is not in the library catalog"))
+ }, SpecTimeout(time.Second * 5))
+ })
+})
+```
+
+Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first specs](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite).
+
+If you have a question, comment, bug report, feature request, etc. please open a [GitHub issue](https://github.com/onsi/ginkgo/issues/new), or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW).
+
+## Capabilities
+
+Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://lunarmodules.github.io/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing.
+
+With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs).
+
+At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as
+
+```bash
+ginkgo -p
+```
+
+By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. And you don't have to worry about your spec suite hanging or leaving a mess behind - Ginkgo provides a per-node `context.Context` and the capability to interrupt the spec after a set period of time - and then clean up.
+
+As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically).
+
+Ginkgo ships with `ginkgo`, a [command line tool](https://onsi.github.io/ginkgo/#ginkgo-cli-overview) with support for generating, running, filtering, and profiling Ginkgo suites. You can even have Ginkgo automatically run your specs when it detects a change with `ginkgo watch`, enabling rapid feedback loops during test-driven development.
+
+And that's just Ginkgo! [Gomega](https://onsi.github.io/gomega/) brings a rich, mature, family of [assertions and matchers](https://onsi.github.io/gomega/#provided-matchers) to your suites. With Gomega you can easily mix [synchronous and asynchronous assertions](https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing) in your specs. You can even build your own set of expressive domain-specific matchers quickly and easily by composing Gomega's [existing building blocks](https://onsi.github.io/ginkgo/#building-custom-matchers).
+
+Happy Testing!
+
+## License
+
+Ginkgo is MIT-Licensed
+
+## Contributing
+
+See [CONTRIBUTING.md](CONTRIBUTING.md)
diff --git a/vendor/github.com/onsi/ginkgo/v2/RELEASING.md b/vendor/github.com/onsi/ginkgo/v2/RELEASING.md
new file mode 100644
index 0000000..363815d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/RELEASING.md
@@ -0,0 +1,23 @@
+A Ginkgo release is a tagged git sha and a GitHub release. To cut a release:
+
+1. Ensure CHANGELOG.md is up to date.
+ - Use
+ ```bash
+ LAST_VERSION=$(git tag --sort=version:refname | tail -n1)
+ CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION)
+ echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md
+ ```
+ to update the changelog
+ - Categorize the changes into
+ - Breaking Changes (requires a major version)
+ - New Features (minor version)
+ - Fixes (fix version)
+ - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
+1. Update `VERSION` in `types/version.go`
+1. Commit, push, and release:
+ ```
+ git commit -m "vM.m.p"
+ git push
+ gh release create "vM.m.p"
+ git fetch --tags origin master
+ ```
\ No newline at end of file
diff --git a/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
new file mode 100644
index 0000000..a61021d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
@@ -0,0 +1,69 @@
+package config
+
+// GinkgoConfigType has been deprecated and its equivalent now lives in
+// the types package. You can no longer access Ginkgo configuration from the config
+// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the
+// current configuration
+//
+// GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error
+// It will be removed in a future minor release of Ginkgo
+type GinkgoConfigType = DeprecatedGinkgoConfigType
+type DeprecatedGinkgoConfigType struct {
+ RandomSeed int64
+ RandomizeAllSpecs bool
+ RegexScansFilePath bool
+ FocusStrings []string
+ SkipStrings []string
+ SkipMeasurements bool
+ FailOnPending bool
+ FailFast bool
+ FlakeAttempts int
+ EmitSpecProgress bool
+ DryRun bool
+ DebugParallel bool
+
+ ParallelNode int
+ ParallelTotal int
+ SyncHost string
+ StreamHost string
+}
+
+// DefaultReporterConfigType has been deprecated and its equivalent now lives in
+// the types package. You can no longer access Ginkgo configuration from the config
+// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the
+// current configuration
+//
+// DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error
+// It will be removed in a future minor release of Ginkgo
+type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType
+type DeprecatedDefaultReporterConfigType struct {
+ NoColor bool
+ SlowSpecThreshold float64
+ NoisyPendings bool
+ NoisySkippings bool
+ Succinct bool
+ Verbose bool
+ FullTrace bool
+ ReportPassed bool
+ ReportFile string
+}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}
diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
new file mode 100644
index 0000000..a3e8237
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
@@ -0,0 +1,847 @@
+/*
+Ginkgo is a testing framework for Go designed to help you write expressive tests.
+https://github.com/onsi/ginkgo
+MIT-Licensed
+
+The godoc documentation outlines Ginkgo's API. Since Ginkgo is a Domain-Specific Language it is important to
+build a mental model for Ginkgo - the narrative documentation at https://onsi.github.io/ginkgo/ is designed to help you do that.
+You should start there - even a brief skim will be helpful. At minimum you should skim through the https://onsi.github.io/ginkgo/#getting-started chapter.
+
+Ginkgo's is best paired with the Gomega matcher library: https://github.com/onsi/gomega
+
+You can run Ginkgo specs with go test - however we recommend using the ginkgo cli. It enables functionality
+that go test does not (especially running suites in parallel). You can learn more at https://onsi.github.io/ginkgo/#ginkgo-cli-overview
+or by running 'ginkgo help'.
+*/
+package ginkgo
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/go-logr/logr"
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/internal/global"
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const GINKGO_VERSION = types.VERSION
+
+var flagSet types.GinkgoFlagSet
+var deprecationTracker = types.NewDeprecationTracker()
+var suiteConfig = types.NewDefaultSuiteConfig()
+var reporterConfig = types.NewDefaultReporterConfig()
+var suiteDidRun = false
+var outputInterceptor internal.OutputInterceptor
+var client parallel_support.Client
+
+func init() {
+ var err error
+ flagSet, err = types.BuildTestSuiteFlagSet(&suiteConfig, &reporterConfig)
+ exitIfErr(err)
+ writer := internal.NewWriter(os.Stdout)
+ GinkgoWriter = writer
+ GinkgoLogr = internal.GinkgoLogrFunc(writer)
+}
+
+func exitIfErr(err error) {
+ if err != nil {
+ if outputInterceptor != nil {
+ outputInterceptor.Shutdown()
+ }
+ if client != nil {
+ client.Close()
+ }
+ fmt.Fprintln(formatter.ColorableStdErr, err.Error())
+ os.Exit(1)
+ }
+}
+
+func exitIfErrors(errors []error) {
+ if len(errors) > 0 {
+ if outputInterceptor != nil {
+ outputInterceptor.Shutdown()
+ }
+ if client != nil {
+ client.Close()
+ }
+ for _, err := range errors {
+ fmt.Fprintln(formatter.ColorableStdErr, err.Error())
+ }
+ os.Exit(1)
+ }
+}
+
+// The interface implemented by GinkgoWriter
+type GinkgoWriterInterface interface {
+ io.Writer
+
+ Print(a ...interface{})
+ Printf(format string, a ...interface{})
+ Println(a ...interface{})
+
+ TeeTo(writer io.Writer)
+ ClearTeeWriters()
+}
+
+/*
+SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integration with Ginkgo).
+
+You can do anything with SpecContext that you do with a typical context.Context including wrapping it with any of the context.With* methods.
+
+Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interrupt signal) or when a node has exceeded its allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation.
+*/
+type SpecContext = internal.SpecContext
+
+/*
+GinkgoWriter implements a GinkgoWriterInterface and io.Writer
+
+When running in verbose mode (ginkgo -v) any writes to GinkgoWriter will be immediately printed
+to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
+only if the current test fails.
+
+GinkgoWriter also provides convenience Print, Printf and Println methods and allows you to tee to a custom writer via GinkgoWriter.TeeTo(writer).
+Writes to GinkgoWriter are immediately sent to any registered TeeTo() writers. You can unregister all TeeTo() Writers with GinkgoWriter.ClearTeeWriters()
+
+You can learn more at https://onsi.github.io/ginkgo/#logging-output
+*/
+var GinkgoWriter GinkgoWriterInterface
+
+/*
+GinkgoLogr is a logr.Logger that writes to GinkgoWriter
+*/
+var GinkgoLogr logr.Logger
+
+// The interface by which Ginkgo receives *testing.T
+type GinkgoTestingT interface {
+ Fail()
+}
+
+/*
+GinkgoConfiguration returns the configuration of the current suite.
+
+The first return value is the SuiteConfig which controls aspects of how the suite runs,
+the second return value is the ReporterConfig which controls aspects of how Ginkgo's default
+reporter emits output.
+
+Mutating the returned configurations has no effect. To reconfigure Ginkgo programmatically you need
+to pass in your mutated copies into RunSpecs().
+
+You can learn more at https://onsi.github.io/ginkgo/#overriding-ginkgos-command-line-configuration-in-the-suite
+*/
+func GinkgoConfiguration() (types.SuiteConfig, types.ReporterConfig) {
+ return suiteConfig, reporterConfig
+}
+
+/*
+GinkgoRandomSeed returns the seed used to randomize spec execution order. It is
+useful for seeding your own pseudorandom number generators to ensure
+consistent executions from run to run, where your tests contain variability (for
+example, when selecting random spec data).
+
+You can learn more at https://onsi.github.io/ginkgo/#spec-randomization
+*/
+func GinkgoRandomSeed() int64 {
+ return suiteConfig.RandomSeed
+}
+
+/*
+GinkgoParallelProcess returns the parallel process number for the current ginkgo process
+The process number is 1-indexed. You can use GinkgoParallelProcess() to shard access to shared
+resources across your suites. You can learn more about patterns for sharding at https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs
+
+For more on how specs are parallelized in Ginkgo, see http://onsi.github.io/ginkgo/#spec-parallelization
+*/
+func GinkgoParallelProcess() int {
+ return suiteConfig.ParallelProcess
+}
+
+/*
+GinkgoHelper marks the function it's called in as a test helper. When a failure occurs inside a helper function, Ginkgo will skip the helper when analyzing the stack trace to identify where the failure occurred.
+
+This is an alternative, simpler, mechanism to passing in a skip offset when calling Fail or using Gomega.
+*/
+func GinkgoHelper() {
+ types.MarkAsHelper(1)
+}
+
+/*
+GinkgoLabelFilter() returns the label filter configured for this suite via `--label-filter`.
+
+You can use this to manually check if a set of labels would satisfy the filter via:
+
+ if (Label("cat", "dog").MatchesLabelFilter(GinkgoLabelFilter())) {
+ //...
+ }
+*/
+func GinkgoLabelFilter() string {
+ suiteConfig, _ := GinkgoConfiguration()
+ return suiteConfig.LabelFilter
+}
+
+/*
+PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant
+when running in parallel and output to stdout/stderr is being intercepted. You generally
+don't need to call this function - however there are cases when Ginkgo's output interception
+mechanisms can interfere with external processes launched by the test process.
+
+In particular, if an external process is launched that has cmd.Stdout/cmd.Stderr set to os.Stdout/os.Stderr
+then Ginkgo's output interceptor will hang. To circumvent this, set cmd.Stdout/cmd.Stderr to GinkgoWriter.
+If, for some reason, you aren't able to do that, you can PauseOutputInterception() before starting the process
+then ResumeOutputInterception() after starting it.
+
+Note that PauseOutputInterception() does not cause stdout writes to print to the console -
+this simply stops intercepting and storing stdout writes to an internal buffer.
+*/
+func PauseOutputInterception() {
+ if outputInterceptor == nil {
+ return
+ }
+ outputInterceptor.PauseIntercepting()
+}
+
+// ResumeOutputInterception() - see docs for PauseOutputInterception()
+func ResumeOutputInterception() {
+ if outputInterceptor == nil {
+ return
+ }
+ outputInterceptor.ResumeIntercepting()
+}
+
+/*
+RunSpecs is the entry point for the Ginkgo spec runner.
+
+You must call this within a Golang testing TestX(t *testing.T) function.
+If you bootstrapped your suite with "ginkgo bootstrap" this is already
+done for you.
+
+Ginkgo is typically configured via command-line flags. This configuration
+can be overridden, however, and passed into RunSpecs as optional arguments:
+
+ func TestMySuite(t *testing.T) {
+ RegisterFailHandler(gomega.Fail)
+ // fetch the current config
+ suiteConfig, reporterConfig := GinkgoConfiguration()
+ // adjust it
+ suiteConfig.SkipStrings = []string{"NEVER-RUN"}
+ reporterConfig.FullTrace = true
+ // pass it in to RunSpecs
+ RunSpecs(t, "My Suite", suiteConfig, reporterConfig)
+ }
+
+Note that some configuration changes can lead to undefined behavior. For example,
+you should not change ParallelProcess or ParallelTotal as the Ginkgo CLI is responsible
+for setting these and orchestrating parallel specs across the parallel processes. See http://onsi.github.io/ginkgo/#spec-parallelization
+for more on how specs are parallelized in Ginkgo.
+
+You can also pass suite-level Label() decorators to RunSpecs. The passed-in labels will apply to all specs in the suite.
+*/
+func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
+ if suiteDidRun {
+ exitIfErr(types.GinkgoErrors.RerunningSuite())
+ }
+ suiteDidRun = true
+ err := global.PushClone()
+ if err != nil {
+ exitIfErr(err)
+ }
+ defer global.PopClone()
+
+ suiteLabels := extractSuiteConfiguration(args)
+
+ var reporter reporters.Reporter
+ if suiteConfig.ParallelTotal == 1 {
+ reporter = reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)
+ outputInterceptor = internal.NoopOutputInterceptor{}
+ client = nil
+ } else {
+ reporter = reporters.NoopReporter{}
+ switch strings.ToLower(suiteConfig.OutputInterceptorMode) {
+ case "swap":
+ outputInterceptor = internal.NewOSGlobalReassigningOutputInterceptor()
+ case "none":
+ outputInterceptor = internal.NoopOutputInterceptor{}
+ default:
+ outputInterceptor = internal.NewOutputInterceptor()
+ }
+ client = parallel_support.NewClient(suiteConfig.ParallelHost)
+ if !client.Connect() {
+ client = nil
+ exitIfErr(types.GinkgoErrors.UnreachableParallelHost(suiteConfig.ParallelHost))
+ }
+ defer client.Close()
+ }
+
+ writer := GinkgoWriter.(*internal.Writer)
+ if reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) && suiteConfig.ParallelTotal == 1 {
+ writer.SetMode(internal.WriterModeStreamAndBuffer)
+ } else {
+ writer.SetMode(internal.WriterModeBufferOnly)
+ }
+
+ if reporterConfig.WillGenerateReport() {
+ registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig)
+ }
+
+ err = global.Suite.BuildTree()
+ exitIfErr(err)
+ suitePath, err := getwd()
+ exitIfErr(err)
+ suitePath, err = filepath.Abs(suitePath)
+ exitIfErr(err)
+
+ passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
+ outputInterceptor.Shutdown()
+
+ flagSet.ValidateDeprecations(deprecationTracker)
+ if deprecationTracker.DidTrackDeprecations() {
+ fmt.Fprintln(formatter.ColorableStdErr, deprecationTracker.DeprecationsReport())
+ }
+
+ if !passed {
+ t.Fail()
+ }
+
+ if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
+ fmt.Println("PASS | FOCUSED")
+ os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
+ }
+ return passed
+}
+
+func extractSuiteConfiguration(args []interface{}) Labels {
+ suiteLabels := Labels{}
+ configErrors := []error{}
+ for _, arg := range args {
+ switch arg := arg.(type) {
+ case types.SuiteConfig:
+ suiteConfig = arg
+ case types.ReporterConfig:
+ reporterConfig = arg
+ case Labels:
+ suiteLabels = append(suiteLabels, arg...)
+ default:
+ configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg))
+ }
+ }
+ exitIfErrors(configErrors)
+
+ configErrors = types.VetConfig(flagSet, suiteConfig, reporterConfig)
+ if len(configErrors) > 0 {
+ fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{red}}Ginkgo detected configuration issues:{{/}}\n"))
+ for _, err := range configErrors {
+ fmt.Fprintf(formatter.ColorableStdErr, err.Error())
+ }
+ os.Exit(1)
+ }
+
+ return suiteLabels
+}
+
+func getwd() (string, error) {
+ if !strings.EqualFold(os.Getenv("GINKGO_PRESERVE_CACHE"), "true") {
+ // Getwd calls os.Getenv("PWD"), which breaks test caching if the cache
+ // is shared between two different directories with the same test code.
+ return os.Getwd()
+ }
+ return "", nil
+}
+
+/*
+PreviewSpecs walks the testing tree and produces a report without actually invoking the specs.
+See http://onsi.github.io/ginkgo/#previewing-specs for more information.
+*/
+func PreviewSpecs(description string, args ...any) Report {
+ err := global.PushClone()
+ if err != nil {
+ exitIfErr(err)
+ }
+ defer global.PopClone()
+
+ suiteLabels := extractSuiteConfiguration(args)
+ priorDryRun, priorParallelTotal, priorParallelProcess := suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess
+ suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = true, 1, 1
+ defer func() {
+ suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = priorDryRun, priorParallelTotal, priorParallelProcess
+ }()
+ reporter := reporters.NoopReporter{}
+ outputInterceptor = internal.NoopOutputInterceptor{}
+ client = nil
+ writer := GinkgoWriter.(*internal.Writer)
+
+ err = global.Suite.BuildTree()
+ exitIfErr(err)
+ suitePath, err := getwd()
+ exitIfErr(err)
+ suitePath, err = filepath.Abs(suitePath)
+ exitIfErr(err)
+
+ global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
+
+ return global.Suite.GetPreviewReport()
+}
+
+/*
+Skip instructs Ginkgo to skip the current spec
+
+You can call Skip in any Setup or Subject node closure.
+
+For more on how to filter specs in Ginkgo see https://onsi.github.io/ginkgo/#filtering-specs
+*/
+func Skip(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+ cl := types.NewCodeLocationWithStackTrace(skip + 1)
+ global.Failer.Skip(message, cl)
+ panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
+}
+
+/*
+Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
+
+Under the hood, Fail panics to end execution of the current spec. Ginkgo will catch this panic and proceed with
+the subsequent spec. If you call Fail, or make an assertion, within a goroutine launched by your spec you must
+add defer GinkgoRecover() to the goroutine to catch the panic emitted by Fail.
+
+You can call Fail in any Setup or Subject node closure.
+
+You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure
+*/
+func Fail(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+
+ cl := types.NewCodeLocationWithStackTrace(skip + 1)
+ global.Failer.Fail(message, cl)
+ panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
+}
+
+/*
+AbortSuite instructs Ginkgo to fail the current spec and skip all subsequent specs, thereby aborting the suite.
+
+You can call AbortSuite in any Setup or Subject node closure.
+
+You can learn more about how Ginkgo handles suite interruptions here: https://onsi.github.io/ginkgo/#interrupting-aborting-and-timing-out-suites
+*/
+func AbortSuite(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+
+ cl := types.NewCodeLocationWithStackTrace(skip + 1)
+ global.Failer.AbortSuite(message, cl)
+ panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
+}
+
+/*
+ignorablePanic is used by Gomega to signal to GinkgoRecover that Goemga is handling
+the error associated with this panic. It i used when Eventually/Consistently are passed a func(g Gomega) and the resulting function launches a goroutines that makes a failed assertion. That failed assertion is registered by Gomega and then panics. Ordinarily the panic is captured by Gomega. In the case of a goroutine Gomega can't capture the panic - so we piggy back on GinkgoRecover so users have a single defer GinkgoRecover() pattern to follow. To do that we need to tell Ginkgo to ignore this panic and not register it as a panic on the global Failer.
+*/
+type ignorablePanic interface{ GinkgoRecoverShouldIgnoreThisPanic() }
+
+/*
+GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
+Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
+calls out to Gomega
+
+Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
+further assertions from running. This panic must be recovered. Normally, Ginkgo recovers the panic for you,
+however if a panic originates on a goroutine *launched* from one of your specs there's no
+way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
+
+You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure
+*/
+func GinkgoRecover() {
+ e := recover()
+ if e != nil {
+ if _, ok := e.(ignorablePanic); ok {
+ return
+ }
+ global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
+ }
+}
+
+// pushNode is used by the various test construction DSL methods to push nodes onto the suite
+// it handles returned errors, emits a detailed error message to help the user learn what they may have done wrong, then exits
+func pushNode(node internal.Node, errors []error) bool {
+ exitIfErrors(errors)
+ exitIfErr(global.Suite.PushNode(node))
+ return true
+}
+
+/*
+Describe nodes are Container nodes that allow you to organize your specs. A Describe node's closure can contain any number of
+Setup nodes (e.g. BeforeEach, AfterEach, JustBeforeEach), and Subject nodes (i.e. It).
+
+Context and When nodes are aliases for Describe - use whichever gives your suite a better narrative flow. It is idomatic
+to Describe the behavior of an object or function and, within that Describe, outline a number of Contexts and Whens.
+
+You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes
+In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+func Describe(text string, args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+}
+
+/*
+FDescribe focuses specs within the Describe block.
+*/
+func FDescribe(text string, args ...interface{}) bool {
+ args = append(args, internal.Focus)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+}
+
+/*
+PDescribe marks specs within the Describe block as pending.
+*/
+func PDescribe(text string, args ...interface{}) bool {
+ args = append(args, internal.Pending)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+}
+
+/*
+XDescribe marks specs within the Describe block as pending.
+
+XDescribe is an alias for PDescribe
+*/
+var XDescribe = PDescribe
+
+/* Context is an alias for Describe - it generates the exact same kind of Container node */
+var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe
+
+/* When is an alias for Describe - it generates the exact same kind of Container node */
+func When(text string, args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+}
+
+/* When is an alias for Describe - it generates the exact same kind of Container node */
+func FWhen(text string, args ...interface{}) bool {
+ args = append(args, internal.Focus)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+}
+
+/* When is an alias for Describe - it generates the exact same kind of Container node */
+func PWhen(text string, args ...interface{}) bool {
+ args = append(args, internal.Pending)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+}
+
+var XWhen = PWhen
+
+/*
+It nodes are Subject nodes that contain your spec code and assertions.
+
+Each It node corresponds to an individual Ginkgo spec. You cannot nest any other Ginkgo nodes within an It node's closure.
+
+You can pass It nodes bare functions (func() {}) or functions that receive a SpecContext or context.Context: func(ctx SpecContext) {} and func (ctx context.Context) {}. If the function takes a context then the It is deemed interruptible and Ginkgo will cancel the context in the event of a timeout (configured via the SpecTimeout() or NodeTimeout() decorators) or of an interrupt signal.
+
+You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it
+In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+func It(text string, args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+}
+
+/*
+FIt allows you to focus an individual It.
+*/
+func FIt(text string, args ...interface{}) bool {
+ args = append(args, internal.Focus)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+}
+
+/*
+PIt allows you to mark an individual It as pending.
+*/
+func PIt(text string, args ...interface{}) bool {
+ args = append(args, internal.Pending)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+}
+
+/*
+XIt allows you to mark an individual It as pending.
+
+XIt is an alias for PIt
+*/
+var XIt = PIt
+
+/*
+Specify is an alias for It - it can allow for more natural wording in some context.
+*/
+var Specify, FSpecify, PSpecify, XSpecify = It, FIt, PIt, XIt
+
+/*
+By allows you to better document complex Specs.
+
+Generally you should try to keep your Its short and to the point. This is not always possible, however,
+especially in the context of integration tests that capture complex or lengthy workflows.
+
+By allows you to document such flows. By may be called within a Setup or Subject node (It, BeforeEach, etc...)
+and will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
+
+By will also generate and attach a ReportEntry to the spec. This will ensure that By annotations appear in Ginkgo's machine-readable reports.
+
+Note that By does not generate a new Ginkgo node - rather it is simply syntactic sugar around GinkgoWriter and AddReportEntry
+You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by
+*/
+func By(text string, callback ...func()) {
+ exitIfErr(global.Suite.By(text, callback...))
+}
+
+/*
+BeforeSuite nodes are suite-level Setup nodes that run just once before any specs are run.
+When running in parallel, each parallel process will call BeforeSuite.
+
+You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
+
+BeforeSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
+*/
+func BeforeSuite(body interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...))
+}
+
+/*
+AfterSuite nodes are suite-level Setup nodes run after all specs have finished - regardless of whether specs have passed or failed.
+AfterSuite node closures always run, even if Ginkgo receives an interrupt signal (^C), in order to ensure cleanup occurs.
+
+When running in parallel, each parallel process will call AfterSuite.
+
+You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
+
+AfterSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within an AfterSuite node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
+*/
+func AfterSuite(body interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...))
+}
+
+/*
+SynchronizedBeforeSuite nodes allow you to perform some of the suite setup just once - on parallel process #1 - and then pass information
+from that setup to the rest of the suite setup on all processes. This is useful for performing expensive or singleton setup once, then passing
+information from that setup to all parallel processes.
+
+SynchronizedBeforeSuite accomplishes this by taking *two* function arguments and passing data between them.
+The first function is only run on parallel process #1. The second is run on all processes, but *only* after the first function completes successfully. The functions have the following signatures:
+
+The first function (which only runs on process #1) can have any of the following the signatures:
+
+ func()
+ func(ctx context.Context)
+ func(ctx SpecContext)
+ func() []byte
+ func(ctx context.Context) []byte
+ func(ctx SpecContext) []byte
+
+The byte array returned by the first function (if present) is then passed to the second function, which can have any of the following signature:
+
+ func()
+ func(ctx context.Context)
+ func(ctx SpecContext)
+ func(data []byte)
+ func(ctx context.Context, data []byte)
+ func(ctx SpecContext, data []byte)
+
+If either function receives a context.Context/SpecContext it is considered interruptible.
+
+You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure.
+You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
+*/
+func SynchronizedBeforeSuite(process1Body interface{}, allProcessBody interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{process1Body, allProcessBody}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...))
+}
+
+/*
+SynchronizedAfterSuite nodes complement the SynchronizedBeforeSuite nodes in solving the problem of splitting clean up into a piece that runs on all processes
+and a piece that must only run once - on process #1.
+
+SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all processes. The second runs only on parallel process #1
+and *only* after all other processes have finished and exited. This ensures that process #1, and any resources it is managing, remain alive until
+all other processes are finished. These two functions can be bare functions (func()) or interruptible (func(context.Context)/func(SpecContext))
+
+Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accomplish similar results.
+
+You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure.
+You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
+*/
+func SynchronizedAfterSuite(allProcessBody interface{}, process1Body interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{allProcessBody, process1Body}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...))
+}
+
+/*
+BeforeEach nodes are Setup nodes whose closures run before It node closures. When multiple BeforeEach nodes
+are defined in nested Container nodes the outermost BeforeEach node closures are run first.
+
+BeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a BeforeEach node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach
+*/
+func BeforeEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...))
+}
+
+/*
+JustBeforeEach nodes are similar to BeforeEach nodes, however they are guaranteed to run *after* all BeforeEach node closures - just before the It node closure.
+This can allow you to separate configuration from creation of resources for a spec.
+
+JustBeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure.
+You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach
+*/
+func JustBeforeEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...))
+}
+
+/*
+AfterEach nodes are Setup nodes whose closures run after It node closures. When multiple AfterEach nodes
+are defined in nested Container nodes the innermost AfterEach node closures are run first.
+
+Note that you can also use DeferCleanup() in other Setup or Subject nodes to accomplish similar results.
+
+AfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within an AfterEach node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup
+*/
+func AfterEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...))
+}
+
+/*
+JustAfterEach nodes are similar to AfterEach nodes, however they are guaranteed to run *before* all AfterEach node closures - just after the It node closure. This can allow you to separate diagnostics collection from teardown for a spec.
+
+JustAfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure.
+You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach
+*/
+func JustAfterEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...))
+}
+
+/*
+BeforeAll nodes are Setup nodes that can occur inside Ordered containers. They run just once before any specs in the Ordered container run.
+
+Multiple BeforeAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container.
+
+BeforeAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a BeforeAll node's closure.
+You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
+And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
+*/
+func BeforeAll(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...))
+}
+
+/*
+AfterAll nodes are Setup nodes that can occur inside Ordered containers. They run just once after all specs in the Ordered container have run.
+
+Multiple AfterAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container.
+
+Note that you can also use DeferCleanup() in a BeforeAll node to accomplish similar behavior.
+
+AfterAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within an AfterAll node's closure.
+You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
+And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
+*/
+func AfterAll(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...))
+}
+
+/*
+DeferCleanup can be called within any Setup or Subject node to register a cleanup callback that Ginkgo will call at the appropriate time to cleanup after the spec.
+
+DeferCleanup can be passed:
+1. A function that takes no arguments and returns no values.
+2. A function that returns multiple values. `DeferCleanup` will ignore all these return values except for the last one. If this last return value is a non-nil error `DeferCleanup` will fail the spec).
+3. A function that takes a context.Context or SpecContext (and optionally returns multiple values). The resulting cleanup node is deemed interruptible and the passed-in context will be cancelled in the event of a timeout or interrupt.
+4. A function that takes arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function.
+5. A function that takes SpecContext and a list of arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function.
+
+For example:
+
+ BeforeEach(func() {
+ DeferCleanup(os.Setenv, "FOO", os.GetEnv("FOO"))
+ os.Setenv("FOO", "BAR")
+ })
+
+will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec.
+
+Similarly:
+
+ BeforeEach(func() {
+ DeferCleanup(func(ctx SpecContext, path) {
+ req, err := http.NewRequestWithContext(ctx, "POST", path, nil)
+ Expect(err).NotTo(HaveOccured())
+ _, err := http.DefaultClient.Do(req)
+ Expect(err).NotTo(HaveOccured())
+ }, "example.com/cleanup", NodeTimeout(time.Second*3))
+ })
+
+will register a cleanup handler that will have three seconds to successfully complete a request to the specified path. Note that we do not specify a context in the list of arguments passed to DeferCleanup - only in the signature of the function we pass in. Ginkgo will detect the requested context and supply a SpecContext when it invokes the cleanup node. If you want to pass in your own context in addition to the Ginkgo-provided SpecContext you must specify the SpecContext as the first argument (e.g. func(ctx SpecContext, otherCtx context.Context)).
+
+When DeferCleanup is called in BeforeEach, JustBeforeEach, It, AfterEach, or JustAfterEach the registered callback will be invoked when the spec completes (i.e. it will behave like an AfterEach node)
+When DeferCleanup is called in BeforeAll or AfterAll the registered callback will be invoked when the ordered container completes (i.e. it will behave like an AfterAll node)
+When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite, or SynchronizedAfterSuite the registered callback will be invoked when the suite completes (i.e. it will behave like an AfterSuite node)
+
+Note that DeferCleanup does not represent a node but rather dynamically generates the appropriate type of cleanup node based on the context in which it is called. As such you must call DeferCleanup within a Setup or Subject node, and not within a Container node.
+You can learn more about DeferCleanup here: https://onsi.github.io/ginkgo/#cleaning-up-our-cleanup-code-defercleanup
+*/
+func DeferCleanup(args ...interface{}) {
+ fail := func(message string, cl types.CodeLocation) {
+ global.Failer.Fail(message, cl)
+ }
+ pushNode(internal.NewCleanupNode(deprecationTracker, fail, args...))
+}
+
+/*
+AttachProgressReporter allows you to register a function that will be called whenever Ginkgo generates a Progress Report. The contents returned by the function will be included in the report.
+
+**This is an experimental feature and the public-facing interface may change in a future minor version of Ginkgo**
+
+Progress Reports are generated:
+- whenever the user explicitly requests one (via `SIGINFO` or `SIGUSR1`)
+- on nodes decorated with PollProgressAfter
+- on suites run with --poll-progress-after
+- whenever a test times out
+
+Ginkgo uses Progress Reports to convey the current state of the test suite, including any running goroutines. By attaching a progress reporter you are able to supplement these reports with additional information.
+
+# AttachProgressReporter returns a function that can be called to detach the progress reporter
+
+You can learn more about AttachProgressReporter here: https://onsi.github.io/ginkgo/#attaching-additional-information-to-progress-reports
+*/
+func AttachProgressReporter(reporter func() string) func() {
+ return global.Suite.AttachProgressReporter(reporter)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl_patch.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl_patch.go
new file mode 100644
index 0000000..0260e4a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl_patch.go
@@ -0,0 +1,27 @@
+package ginkgo
+
+import (
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/internal/global"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func AppendSpecText(test *internal.Spec, text string) {
+ test.AppendText(text)
+}
+
+func GetSuite() *internal.Suite {
+ return global.Suite
+}
+
+func GetFailer() *internal.Failer {
+ return global.Failer
+}
+
+func GetWriter() *internal.Writer {
+ return GinkgoWriter.(*internal.Writer)
+}
+
+func SetReporterConfig(r types.ReporterConfig) {
+ reporterConfig = r
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
new file mode 100644
index 0000000..c65af4c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
@@ -0,0 +1,143 @@
+package ginkgo
+
+import (
+ "github.com/onsi/ginkgo/v2/internal"
+)
+
+/*
+Offset(uint) is a decorator that allows you to change the stack-frame offset used when computing the line number of the node in question.
+
+You can learn more here: https://onsi.github.io/ginkgo/#the-offset-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+type Offset = internal.Offset
+
+/*
+FlakeAttempts(uint N) is a decorator that allows you to mark individual specs or spec containers as flaky. Ginkgo will run them up to `N` times until they pass.
+
+You can learn more here: https://onsi.github.io/ginkgo/#the-flakeattempts-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+type FlakeAttempts = internal.FlakeAttempts
+
+/*
+MustPassRepeatedly(uint N) is a decorator that allows you to repeat the execution of individual specs or spec containers. Ginkgo will run them up to `N` times until they fail.
+
+You can learn more here: https://onsi.github.io/ginkgo/#the-mustpassrepeatedly-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+type MustPassRepeatedly = internal.MustPassRepeatedly
+
+/*
+Focus is a decorator that allows you to mark a spec or container as focused. Identical to FIt and FDescribe.
+
+You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Focus = internal.Focus
+
+/*
+Pending is a decorator that allows you to mark a spec or container as pending. Identical to PIt and PDescribe.
+
+You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Pending = internal.Pending
+
+/*
+Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs.
+Specs in ordered containers cannot be marked as serial - mark the ordered container instead.
+
+You can learn more here: https://onsi.github.io/ginkgo/#serial-specs
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Serial = internal.Serial
+
+/*
+Ordered is a decorator that allows you to mark a container as ordered. Specs in the container will always run in the order they appear.
+They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs.
+
+You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Ordered = internal.Ordered
+
+/*
+ContinueOnFailure is a decorator that allows you to mark an Ordered container to continue running specs even if failures occur. Ordinarily an ordered container will stop running specs after the first failure occurs. Note that if a BeforeAll or a BeforeEach/JustBeforeEach annotated with OncePerOrdered fails then no specs will run as the precondition for the Ordered container will consider to be failed.
+
+ContinueOnFailure only applies to the outermost Ordered container. Attempting to place ContinueOnFailure in a nested container will result in an error.
+
+You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const ContinueOnFailure = internal.ContinueOnFailure
+
+/*
+OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once
+per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container.
+The behavior for non-Ordered containers/specs is unchanged.
+
+You can learn more here: https://onsi.github.io/ginkgo/#setup-around-ordered-containers-the-onceperordered-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const OncePerOrdered = internal.OncePerOrdered
+
+/*
+Label decorates specs with Labels. Multiple labels can be passed to Label and these can be arbitrary strings but must not include the following characters: "&|!,()/".
+Labels can be applied to container and subject nodes, but not setup nodes. You can provide multiple Labels to a given node and a spec's labels is the union of all labels in its node hierarchy.
+
+You can learn more here: https://onsi.github.io/ginkgo/#spec-labels
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+func Label(labels ...string) Labels {
+ return Labels(labels)
+}
+
+/*
+Labels are the type for spec Label decorators. Use Label(...) to construct Labels.
+You can learn more here: https://onsi.github.io/ginkgo/#spec-labels
+*/
+type Labels = internal.Labels
+
+/*
+PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node.
+
+Ginkgo will start emitting node progress if the node is still running after a duration of PollProgressAfter. This allows you to get quicker feedback about the state of a long-running spec.
+*/
+type PollProgressAfter = internal.PollProgressAfter
+
+/*
+PollProgressInterval allows you to override the configured value for --poll-progress-interval for a particular node.
+
+Once a node has been running for longer than PollProgressAfter Ginkgo will emit node progress periodically at an interval of PollProgresInterval.
+*/
+type PollProgressInterval = internal.PollProgressInterval
+
+/*
+NodeTimeout allows you to specify a timeout for an indivdiual node. The node cannot be a container and must be interruptible (i.e. it must be passed a function that accepts a SpecContext or context.Context).
+
+If the node does not exit within the specified NodeTimeout its context will be cancelled. The node wil then have a period of time controlled by the GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec.
+*/
+type NodeTimeout = internal.NodeTimeout
+
+/*
+SpecTimeout allows you to specify a timeout for an indivdiual spec. SpecTimeout can only decorate interruptible It nodes.
+
+All nodes associated with the It node will need to complete before the SpecTimeout has elapsed. Individual nodes (e.g. BeforeEach) may be decorated with different NodeTimeouts - but these can only serve to provide a more stringent deadline for the node in question; they cannot extend the deadline past the SpecTimeout.
+
+If the spec does not complete within the specified SpecTimeout the currently running node will have its context cancelled. The node wil then have a period of time controlled by that node's GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec.
+*/
+type SpecTimeout = internal.SpecTimeout
+
+/*
+GracePeriod denotes the period of time Ginkgo will wait for an interruptible node to exit once an interruption (whether due to a timeout or a user-invoked signal) has occurred. If both the global --grace-period cli flag and a GracePeriod decorator are specified the value in the decorator will take precedence.
+
+Nodes that do not finish within a GracePeriod will be leaked and Ginkgo will proceed to run subsequent nodes. In the event of a timeout, such leaks will be reported to the user.
+*/
+type GracePeriod = internal.GracePeriod
+
+/*
+SuppressProgressReporting is a decorator that allows you to disable progress reporting of a particular node. This is useful if `ginkgo -v -progress` is generating too much noise; particularly
+if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports.
+*/
+const SuppressProgressReporting = internal.SuppressProgressReporting
diff --git a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
new file mode 100644
index 0000000..f912bbe
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
@@ -0,0 +1,135 @@
+package ginkgo
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/internal/global"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+Deprecated: Done Channel for asynchronous testing
+
+The Done channel pattern is no longer supported in Ginkgo 2.0.
+See here for better patterns for asynchronous testing: https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing
+
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-async-testing
+*/
+type Done = internal.Done
+
+/*
+Deprecated: Custom Ginkgo test reporters are deprecated in Ginkgo 2.0.
+
+Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+*/
+type Reporter = reporters.DeprecatedReporter
+
+/*
+Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithDefaultAndCustomReporters will simply call RunSpecs()
+
+Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+*/
+func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
+ return RunSpecs(t, description)
+}
+
+/*
+Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithCustomReporters will simply call RunSpecs()
+
+Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+*/
+func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
+ return RunSpecs(t, description)
+}
+
+/*
+Deprecated: GinkgoTestDescription has been replaced with SpecReport.
+
+Use CurrentSpecReport() instead.
+You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
+The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
+*/
+type DeprecatedGinkgoTestDescription struct {
+ FullTestText string
+ ComponentTexts []string
+ TestText string
+
+ FileName string
+ LineNumber int
+
+ Failed bool
+ Duration time.Duration
+}
+type GinkgoTestDescription = DeprecatedGinkgoTestDescription
+
+/*
+Deprecated: CurrentGinkgoTestDescription has been replaced with CurrentSpecReport.
+
+Use CurrentSpecReport() instead.
+You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
+The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
+*/
+func CurrentGinkgoTestDescription() DeprecatedGinkgoTestDescription {
+ deprecationTracker.TrackDeprecation(
+ types.Deprecations.CurrentGinkgoTestDescription(),
+ types.NewCodeLocation(1),
+ )
+ report := global.Suite.CurrentSpecReport()
+ if report.State == types.SpecStateInvalid {
+ return GinkgoTestDescription{}
+ }
+ componentTexts := []string{}
+ componentTexts = append(componentTexts, report.ContainerHierarchyTexts...)
+ componentTexts = append(componentTexts, report.LeafNodeText)
+
+ return DeprecatedGinkgoTestDescription{
+ ComponentTexts: componentTexts,
+ FullTestText: report.FullText(),
+ TestText: report.LeafNodeText,
+ FileName: report.LeafNodeLocation.FileName,
+ LineNumber: report.LeafNodeLocation.LineNumber,
+ Failed: report.State.Is(types.SpecStateFailureStates),
+ Duration: report.RunTime,
+ }
+}
+
+/*
+Deprecated: GinkgoParallelNode() has been renamed to GinkgoParallelProcess()
+*/
+func GinkgoParallelNode() int {
+ deprecationTracker.TrackDeprecation(
+ types.Deprecations.ParallelNode(),
+ types.NewCodeLocation(1),
+ )
+ return GinkgoParallelProcess()
+}
+
+/*
+Deprecated: Benchmarker has been removed from Ginkgo 2.0
+
+Use Gomega's gmeasure package instead.
+You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
+*/
+type Benchmarker interface {
+ Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
+ RecordValue(name string, value float64, info ...interface{})
+ RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
+}
+
+/*
+Deprecated: Measure() has been removed from Ginkgo 2.0
+
+Use Gomega's gmeasure package instead.
+You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
+*/
+func Measure(_ ...interface{}) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), types.NewCodeLocation(1))
+ return true
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go
new file mode 100644
index 0000000..778bfd7
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go
@@ -0,0 +1,41 @@
+// +build !windows
+
+/*
+These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com
+
+ * go-colorable:
+ * go-isatty:
+
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package formatter
+
+import (
+ "io"
+ "os"
+)
+
+func newColorable(file *os.File) io.Writer {
+ return file
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
new file mode 100644
index 0000000..dd1d143
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
@@ -0,0 +1,809 @@
+/*
+These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com
+
+ * go-colorable:
+ * go-isatty:
+
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package formatter
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+ procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
+ procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
+ procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+ procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+func isTerminal(fd uintptr) bool {
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
+
+const (
+ foregroundBlue = 0x1
+ foregroundGreen = 0x2
+ foregroundRed = 0x4
+ foregroundIntensity = 0x8
+ foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
+ backgroundBlue = 0x10
+ backgroundGreen = 0x20
+ backgroundRed = 0x40
+ backgroundIntensity = 0x80
+ backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
+)
+
+type wchar uint16
+type short int16
+type dword uint32
+type word uint16
+
+type coord struct {
+ x short
+ y short
+}
+
+type smallRect struct {
+ left short
+ top short
+ right short
+ bottom short
+}
+
+type consoleScreenBufferInfo struct {
+ size coord
+ cursorPosition coord
+ attributes word
+ window smallRect
+ maximumWindowSize coord
+}
+
+type writer struct {
+ out io.Writer
+ handle syscall.Handle
+ lastbuf bytes.Buffer
+ oldattr word
+}
+
+func newColorable(file *os.File) io.Writer {
+ if file == nil {
+ panic("nil passed instead of *os.File to NewColorable()")
+ }
+
+ if isTerminal(file.Fd()) {
+ var csbi consoleScreenBufferInfo
+ handle := syscall.Handle(file.Fd())
+ procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+ return &writer{out: file, handle: handle, oldattr: csbi.attributes}
+ } else {
+ return file
+ }
+}
+
+var color256 = map[int]int{
+ 0: 0x000000,
+ 1: 0x800000,
+ 2: 0x008000,
+ 3: 0x808000,
+ 4: 0x000080,
+ 5: 0x800080,
+ 6: 0x008080,
+ 7: 0xc0c0c0,
+ 8: 0x808080,
+ 9: 0xff0000,
+ 10: 0x00ff00,
+ 11: 0xffff00,
+ 12: 0x0000ff,
+ 13: 0xff00ff,
+ 14: 0x00ffff,
+ 15: 0xffffff,
+ 16: 0x000000,
+ 17: 0x00005f,
+ 18: 0x000087,
+ 19: 0x0000af,
+ 20: 0x0000d7,
+ 21: 0x0000ff,
+ 22: 0x005f00,
+ 23: 0x005f5f,
+ 24: 0x005f87,
+ 25: 0x005faf,
+ 26: 0x005fd7,
+ 27: 0x005fff,
+ 28: 0x008700,
+ 29: 0x00875f,
+ 30: 0x008787,
+ 31: 0x0087af,
+ 32: 0x0087d7,
+ 33: 0x0087ff,
+ 34: 0x00af00,
+ 35: 0x00af5f,
+ 36: 0x00af87,
+ 37: 0x00afaf,
+ 38: 0x00afd7,
+ 39: 0x00afff,
+ 40: 0x00d700,
+ 41: 0x00d75f,
+ 42: 0x00d787,
+ 43: 0x00d7af,
+ 44: 0x00d7d7,
+ 45: 0x00d7ff,
+ 46: 0x00ff00,
+ 47: 0x00ff5f,
+ 48: 0x00ff87,
+ 49: 0x00ffaf,
+ 50: 0x00ffd7,
+ 51: 0x00ffff,
+ 52: 0x5f0000,
+ 53: 0x5f005f,
+ 54: 0x5f0087,
+ 55: 0x5f00af,
+ 56: 0x5f00d7,
+ 57: 0x5f00ff,
+ 58: 0x5f5f00,
+ 59: 0x5f5f5f,
+ 60: 0x5f5f87,
+ 61: 0x5f5faf,
+ 62: 0x5f5fd7,
+ 63: 0x5f5fff,
+ 64: 0x5f8700,
+ 65: 0x5f875f,
+ 66: 0x5f8787,
+ 67: 0x5f87af,
+ 68: 0x5f87d7,
+ 69: 0x5f87ff,
+ 70: 0x5faf00,
+ 71: 0x5faf5f,
+ 72: 0x5faf87,
+ 73: 0x5fafaf,
+ 74: 0x5fafd7,
+ 75: 0x5fafff,
+ 76: 0x5fd700,
+ 77: 0x5fd75f,
+ 78: 0x5fd787,
+ 79: 0x5fd7af,
+ 80: 0x5fd7d7,
+ 81: 0x5fd7ff,
+ 82: 0x5fff00,
+ 83: 0x5fff5f,
+ 84: 0x5fff87,
+ 85: 0x5fffaf,
+ 86: 0x5fffd7,
+ 87: 0x5fffff,
+ 88: 0x870000,
+ 89: 0x87005f,
+ 90: 0x870087,
+ 91: 0x8700af,
+ 92: 0x8700d7,
+ 93: 0x8700ff,
+ 94: 0x875f00,
+ 95: 0x875f5f,
+ 96: 0x875f87,
+ 97: 0x875faf,
+ 98: 0x875fd7,
+ 99: 0x875fff,
+ 100: 0x878700,
+ 101: 0x87875f,
+ 102: 0x878787,
+ 103: 0x8787af,
+ 104: 0x8787d7,
+ 105: 0x8787ff,
+ 106: 0x87af00,
+ 107: 0x87af5f,
+ 108: 0x87af87,
+ 109: 0x87afaf,
+ 110: 0x87afd7,
+ 111: 0x87afff,
+ 112: 0x87d700,
+ 113: 0x87d75f,
+ 114: 0x87d787,
+ 115: 0x87d7af,
+ 116: 0x87d7d7,
+ 117: 0x87d7ff,
+ 118: 0x87ff00,
+ 119: 0x87ff5f,
+ 120: 0x87ff87,
+ 121: 0x87ffaf,
+ 122: 0x87ffd7,
+ 123: 0x87ffff,
+ 124: 0xaf0000,
+ 125: 0xaf005f,
+ 126: 0xaf0087,
+ 127: 0xaf00af,
+ 128: 0xaf00d7,
+ 129: 0xaf00ff,
+ 130: 0xaf5f00,
+ 131: 0xaf5f5f,
+ 132: 0xaf5f87,
+ 133: 0xaf5faf,
+ 134: 0xaf5fd7,
+ 135: 0xaf5fff,
+ 136: 0xaf8700,
+ 137: 0xaf875f,
+ 138: 0xaf8787,
+ 139: 0xaf87af,
+ 140: 0xaf87d7,
+ 141: 0xaf87ff,
+ 142: 0xafaf00,
+ 143: 0xafaf5f,
+ 144: 0xafaf87,
+ 145: 0xafafaf,
+ 146: 0xafafd7,
+ 147: 0xafafff,
+ 148: 0xafd700,
+ 149: 0xafd75f,
+ 150: 0xafd787,
+ 151: 0xafd7af,
+ 152: 0xafd7d7,
+ 153: 0xafd7ff,
+ 154: 0xafff00,
+ 155: 0xafff5f,
+ 156: 0xafff87,
+ 157: 0xafffaf,
+ 158: 0xafffd7,
+ 159: 0xafffff,
+ 160: 0xd70000,
+ 161: 0xd7005f,
+ 162: 0xd70087,
+ 163: 0xd700af,
+ 164: 0xd700d7,
+ 165: 0xd700ff,
+ 166: 0xd75f00,
+ 167: 0xd75f5f,
+ 168: 0xd75f87,
+ 169: 0xd75faf,
+ 170: 0xd75fd7,
+ 171: 0xd75fff,
+ 172: 0xd78700,
+ 173: 0xd7875f,
+ 174: 0xd78787,
+ 175: 0xd787af,
+ 176: 0xd787d7,
+ 177: 0xd787ff,
+ 178: 0xd7af00,
+ 179: 0xd7af5f,
+ 180: 0xd7af87,
+ 181: 0xd7afaf,
+ 182: 0xd7afd7,
+ 183: 0xd7afff,
+ 184: 0xd7d700,
+ 185: 0xd7d75f,
+ 186: 0xd7d787,
+ 187: 0xd7d7af,
+ 188: 0xd7d7d7,
+ 189: 0xd7d7ff,
+ 190: 0xd7ff00,
+ 191: 0xd7ff5f,
+ 192: 0xd7ff87,
+ 193: 0xd7ffaf,
+ 194: 0xd7ffd7,
+ 195: 0xd7ffff,
+ 196: 0xff0000,
+ 197: 0xff005f,
+ 198: 0xff0087,
+ 199: 0xff00af,
+ 200: 0xff00d7,
+ 201: 0xff00ff,
+ 202: 0xff5f00,
+ 203: 0xff5f5f,
+ 204: 0xff5f87,
+ 205: 0xff5faf,
+ 206: 0xff5fd7,
+ 207: 0xff5fff,
+ 208: 0xff8700,
+ 209: 0xff875f,
+ 210: 0xff8787,
+ 211: 0xff87af,
+ 212: 0xff87d7,
+ 213: 0xff87ff,
+ 214: 0xffaf00,
+ 215: 0xffaf5f,
+ 216: 0xffaf87,
+ 217: 0xffafaf,
+ 218: 0xffafd7,
+ 219: 0xffafff,
+ 220: 0xffd700,
+ 221: 0xffd75f,
+ 222: 0xffd787,
+ 223: 0xffd7af,
+ 224: 0xffd7d7,
+ 225: 0xffd7ff,
+ 226: 0xffff00,
+ 227: 0xffff5f,
+ 228: 0xffff87,
+ 229: 0xffffaf,
+ 230: 0xffffd7,
+ 231: 0xffffff,
+ 232: 0x080808,
+ 233: 0x121212,
+ 234: 0x1c1c1c,
+ 235: 0x262626,
+ 236: 0x303030,
+ 237: 0x3a3a3a,
+ 238: 0x444444,
+ 239: 0x4e4e4e,
+ 240: 0x585858,
+ 241: 0x626262,
+ 242: 0x6c6c6c,
+ 243: 0x767676,
+ 244: 0x808080,
+ 245: 0x8a8a8a,
+ 246: 0x949494,
+ 247: 0x9e9e9e,
+ 248: 0xa8a8a8,
+ 249: 0xb2b2b2,
+ 250: 0xbcbcbc,
+ 251: 0xc6c6c6,
+ 252: 0xd0d0d0,
+ 253: 0xdadada,
+ 254: 0xe4e4e4,
+ 255: 0xeeeeee,
+}
+
+func (w *writer) Write(data []byte) (n int, err error) {
+ var csbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+
+ er := bytes.NewBuffer(data)
+loop:
+ for {
+ r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ if r1 == 0 {
+ break loop
+ }
+
+ c1, _, err := er.ReadRune()
+ if err != nil {
+ break loop
+ }
+ if c1 != 0x1b {
+ fmt.Fprint(w.out, string(c1))
+ continue
+ }
+ c2, _, err := er.ReadRune()
+ if err != nil {
+ w.lastbuf.WriteRune(c1)
+ break loop
+ }
+ if c2 != 0x5b {
+ w.lastbuf.WriteRune(c1)
+ w.lastbuf.WriteRune(c2)
+ continue
+ }
+
+ var buf bytes.Buffer
+ var m rune
+ for {
+ c, _, err := er.ReadRune()
+ if err != nil {
+ w.lastbuf.WriteRune(c1)
+ w.lastbuf.WriteRune(c2)
+ w.lastbuf.Write(buf.Bytes())
+ break loop
+ }
+ if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+ m = c
+ break
+ }
+ buf.Write([]byte(string(c)))
+ }
+
+ var csbi consoleScreenBufferInfo
+ switch m {
+ case 'A':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.y -= short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'B':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.y += short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'C':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x -= short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'D':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ if n, err = strconv.Atoi(buf.String()); err == nil {
+ var csbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x += short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ }
+ case 'E':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x = 0
+ csbi.cursorPosition.y += short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'F':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x = 0
+ csbi.cursorPosition.y -= short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'G':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x = short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'H':
+ token := strings.Split(buf.String(), ";")
+ if len(token) != 2 {
+ continue
+ }
+ n1, err := strconv.Atoi(token[0])
+ if err != nil {
+ continue
+ }
+ n2, err := strconv.Atoi(token[1])
+ if err != nil {
+ continue
+ }
+ csbi.cursorPosition.x = short(n2)
+ csbi.cursorPosition.x = short(n1)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'J':
+ n, err := strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ var cursor coord
+ switch n {
+ case 0:
+ cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+ case 1:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top}
+ case 2:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top}
+ }
+ var count, written dword
+ count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
+ procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ case 'K':
+ n, err := strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ var cursor coord
+ switch n {
+ case 0:
+ cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+ case 1:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+ case 2:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+ }
+ var count, written dword
+ count = dword(csbi.size.x - csbi.cursorPosition.x)
+ procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ case 'm':
+ attr := csbi.attributes
+ cs := buf.String()
+ if cs == "" {
+ procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
+ continue
+ }
+ token := strings.Split(cs, ";")
+ for i := 0; i < len(token); i += 1 {
+ ns := token[i]
+ if n, err = strconv.Atoi(ns); err == nil {
+ switch {
+ case n == 0 || n == 100:
+ attr = w.oldattr
+ case 1 <= n && n <= 5:
+ attr |= foregroundIntensity
+ case n == 7:
+ attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+ case 22 == n || n == 25 || n == 25:
+ attr |= foregroundIntensity
+ case n == 27:
+ attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+ case 30 <= n && n <= 37:
+ attr = (attr & backgroundMask)
+ if (n-30)&1 != 0 {
+ attr |= foregroundRed
+ }
+ if (n-30)&2 != 0 {
+ attr |= foregroundGreen
+ }
+ if (n-30)&4 != 0 {
+ attr |= foregroundBlue
+ }
+ case n == 38: // set foreground color.
+ if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
+ if n256, err := strconv.Atoi(token[i+2]); err == nil {
+ if n256foreAttr == nil {
+ n256setup()
+ }
+ attr &= backgroundMask
+ attr |= n256foreAttr[n256]
+ i += 2
+ }
+ } else {
+ attr = attr & (w.oldattr & backgroundMask)
+ }
+ case n == 39: // reset foreground color.
+ attr &= backgroundMask
+ attr |= w.oldattr & foregroundMask
+ case 40 <= n && n <= 47:
+ attr = (attr & foregroundMask)
+ if (n-40)&1 != 0 {
+ attr |= backgroundRed
+ }
+ if (n-40)&2 != 0 {
+ attr |= backgroundGreen
+ }
+ if (n-40)&4 != 0 {
+ attr |= backgroundBlue
+ }
+ case n == 48: // set background color.
+ if i < len(token)-2 && token[i+1] == "5" {
+ if n256, err := strconv.Atoi(token[i+2]); err == nil {
+ if n256backAttr == nil {
+ n256setup()
+ }
+ attr &= foregroundMask
+ attr |= n256backAttr[n256]
+ i += 2
+ }
+ } else {
+ attr = attr & (w.oldattr & foregroundMask)
+ }
+ case n == 49: // reset foreground color.
+ attr &= foregroundMask
+ attr |= w.oldattr & backgroundMask
+ case 90 <= n && n <= 97:
+ attr = (attr & backgroundMask)
+ attr |= foregroundIntensity
+ if (n-90)&1 != 0 {
+ attr |= foregroundRed
+ }
+ if (n-90)&2 != 0 {
+ attr |= foregroundGreen
+ }
+ if (n-90)&4 != 0 {
+ attr |= foregroundBlue
+ }
+ case 100 <= n && n <= 107:
+ attr = (attr & foregroundMask)
+ attr |= backgroundIntensity
+ if (n-100)&1 != 0 {
+ attr |= backgroundRed
+ }
+ if (n-100)&2 != 0 {
+ attr |= backgroundGreen
+ }
+ if (n-100)&4 != 0 {
+ attr |= backgroundBlue
+ }
+ }
+ procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
+ }
+ }
+ }
+ }
+ return len(data) - w.lastbuf.Len(), nil
+}
+
+type consoleColor struct {
+ rgb int
+ red bool
+ green bool
+ blue bool
+ intensity bool
+}
+
+func (c consoleColor) foregroundAttr() (attr word) {
+ if c.red {
+ attr |= foregroundRed
+ }
+ if c.green {
+ attr |= foregroundGreen
+ }
+ if c.blue {
+ attr |= foregroundBlue
+ }
+ if c.intensity {
+ attr |= foregroundIntensity
+ }
+ return
+}
+
+func (c consoleColor) backgroundAttr() (attr word) {
+ if c.red {
+ attr |= backgroundRed
+ }
+ if c.green {
+ attr |= backgroundGreen
+ }
+ if c.blue {
+ attr |= backgroundBlue
+ }
+ if c.intensity {
+ attr |= backgroundIntensity
+ }
+ return
+}
+
+var color16 = []consoleColor{
+ consoleColor{0x000000, false, false, false, false},
+ consoleColor{0x000080, false, false, true, false},
+ consoleColor{0x008000, false, true, false, false},
+ consoleColor{0x008080, false, true, true, false},
+ consoleColor{0x800000, true, false, false, false},
+ consoleColor{0x800080, true, false, true, false},
+ consoleColor{0x808000, true, true, false, false},
+ consoleColor{0xc0c0c0, true, true, true, false},
+ consoleColor{0x808080, false, false, false, true},
+ consoleColor{0x0000ff, false, false, true, true},
+ consoleColor{0x00ff00, false, true, false, true},
+ consoleColor{0x00ffff, false, true, true, true},
+ consoleColor{0xff0000, true, false, false, true},
+ consoleColor{0xff00ff, true, false, true, true},
+ consoleColor{0xffff00, true, true, false, true},
+ consoleColor{0xffffff, true, true, true, true},
+}
+
+type hsv struct {
+ h, s, v float32
+}
+
+func (a hsv) dist(b hsv) float32 {
+ dh := a.h - b.h
+ switch {
+ case dh > 0.5:
+ dh = 1 - dh
+ case dh < -0.5:
+ dh = -1 - dh
+ }
+ ds := a.s - b.s
+ dv := a.v - b.v
+ return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
+}
+
+func toHSV(rgb int) hsv {
+ r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
+ float32((rgb&0x00FF00)>>8)/256.0,
+ float32(rgb&0x0000FF)/256.0
+ min, max := minmax3f(r, g, b)
+ h := max - min
+ if h > 0 {
+ if max == r {
+ h = (g - b) / h
+ if h < 0 {
+ h += 6
+ }
+ } else if max == g {
+ h = 2 + (b-r)/h
+ } else {
+ h = 4 + (r-g)/h
+ }
+ }
+ h /= 6.0
+ s := max - min
+ if max != 0 {
+ s /= max
+ }
+ v := max
+ return hsv{h: h, s: s, v: v}
+}
+
+type hsvTable []hsv
+
+func toHSVTable(rgbTable []consoleColor) hsvTable {
+ t := make(hsvTable, len(rgbTable))
+ for i, c := range rgbTable {
+ t[i] = toHSV(c.rgb)
+ }
+ return t
+}
+
+func (t hsvTable) find(rgb int) consoleColor {
+ hsv := toHSV(rgb)
+ n := 7
+ l := float32(5.0)
+ for i, p := range t {
+ d := hsv.dist(p)
+ if d < l {
+ l, n = d, i
+ }
+ }
+ return color16[n]
+}
+
+func minmax3f(a, b, c float32) (min, max float32) {
+ if a < b {
+ if b < c {
+ return a, c
+ } else if a < c {
+ return a, b
+ } else {
+ return c, b
+ }
+ } else {
+ if a < c {
+ return b, c
+ } else if b < c {
+ return b, a
+ } else {
+ return c, a
+ }
+ }
+}
+
+var n256foreAttr []word
+var n256backAttr []word
+
+func n256setup() {
+ n256foreAttr = make([]word, 256)
+ n256backAttr = make([]word, 256)
+ t := toHSVTable(color16)
+ for i, rgb := range color256 {
+ c := t.find(rgb)
+ n256foreAttr[i] = c.foregroundAttr()
+ n256backAttr[i] = c.backgroundAttr()
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
new file mode 100644
index 0000000..743555d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
@@ -0,0 +1,230 @@
+package formatter
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// ColorableStdOut and ColorableStdErr enable color output support on Windows
+var ColorableStdOut = newColorable(os.Stdout)
+var ColorableStdErr = newColorable(os.Stderr)
+
+const COLS = 80
+
+type ColorMode uint8
+
+const (
+ ColorModeNone ColorMode = iota
+ ColorModeTerminal
+ ColorModePassthrough
+)
+
+var SingletonFormatter = New(ColorModeTerminal)
+
+func F(format string, args ...interface{}) string {
+ return SingletonFormatter.F(format, args...)
+}
+
+func Fi(indentation uint, format string, args ...interface{}) string {
+ return SingletonFormatter.Fi(indentation, format, args...)
+}
+
+func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
+ return SingletonFormatter.Fiw(indentation, maxWidth, format, args...)
+}
+
+type Formatter struct {
+ ColorMode ColorMode
+ colors map[string]string
+ styleRe *regexp.Regexp
+ preserveColorStylingTags bool
+}
+
+func NewWithNoColorBool(noColor bool) Formatter {
+ if noColor {
+ return New(ColorModeNone)
+ }
+ return New(ColorModeTerminal)
+}
+
+func New(colorMode ColorMode) Formatter {
+ colorAliases := map[string]int{
+ "black": 0,
+ "red": 1,
+ "green": 2,
+ "yellow": 3,
+ "blue": 4,
+ "magenta": 5,
+ "cyan": 6,
+ "white": 7,
+ }
+ for colorAlias, n := range colorAliases {
+ colorAliases[fmt.Sprintf("bright-%s", colorAlias)] = n + 8
+ }
+
+ getColor := func(color, defaultEscapeCode string) string {
+ color = strings.ToUpper(strings.ReplaceAll(color, "-", "_"))
+ envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color)
+ envVarColor := os.Getenv(envVar)
+ if envVarColor == "" {
+ return defaultEscapeCode
+ }
+ if colorCode, ok := colorAliases[envVarColor]; ok {
+ return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
+ }
+ colorCode, err := strconv.Atoi(envVarColor)
+ if err != nil || colorCode < 0 || colorCode > 255 {
+ return defaultEscapeCode
+ }
+ return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
+ }
+
+ f := Formatter{
+ ColorMode: colorMode,
+ colors: map[string]string{
+ "/": "\x1b[0m",
+ "bold": "\x1b[1m",
+ "underline": "\x1b[4m",
+
+ "red": getColor("red", "\x1b[38;5;9m"),
+ "orange": getColor("orange", "\x1b[38;5;214m"),
+ "coral": getColor("coral", "\x1b[38;5;204m"),
+ "magenta": getColor("magenta", "\x1b[38;5;13m"),
+ "green": getColor("green", "\x1b[38;5;10m"),
+ "dark-green": getColor("dark-green", "\x1b[38;5;28m"),
+ "yellow": getColor("yellow", "\x1b[38;5;11m"),
+ "light-yellow": getColor("light-yellow", "\x1b[38;5;228m"),
+ "cyan": getColor("cyan", "\x1b[38;5;14m"),
+ "gray": getColor("gray", "\x1b[38;5;243m"),
+ "light-gray": getColor("light-gray", "\x1b[38;5;246m"),
+ "blue": getColor("blue", "\x1b[38;5;12m"),
+ },
+ }
+ colors := []string{}
+ for color := range f.colors {
+ colors = append(colors, color)
+ }
+ f.styleRe = regexp.MustCompile("{{(" + strings.Join(colors, "|") + ")}}")
+ return f
+}
+
+func (f Formatter) F(format string, args ...interface{}) string {
+ return f.Fi(0, format, args...)
+}
+
+func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string {
+ return f.Fiw(indentation, 0, format, args...)
+}
+
+func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
+ out := f.style(format)
+ if len(args) > 0 {
+ out = fmt.Sprintf(out, args...)
+ }
+
+ if indentation == 0 && maxWidth == 0 {
+ return out
+ }
+
+ lines := strings.Split(out, "\n")
+
+ if maxWidth != 0 {
+ outLines := []string{}
+
+ maxWidth = maxWidth - indentation*2
+ for _, line := range lines {
+ if f.length(line) <= maxWidth {
+ outLines = append(outLines, line)
+ continue
+ }
+ words := strings.Split(line, " ")
+ outWords := []string{words[0]}
+ length := uint(f.length(words[0]))
+ for _, word := range words[1:] {
+ wordLength := f.length(word)
+ if length+wordLength+1 <= maxWidth {
+ length += wordLength + 1
+ outWords = append(outWords, word)
+ continue
+ }
+ outLines = append(outLines, strings.Join(outWords, " "))
+ outWords = []string{word}
+ length = wordLength
+ }
+ if len(outWords) > 0 {
+ outLines = append(outLines, strings.Join(outWords, " "))
+ }
+ }
+
+ lines = outLines
+ }
+
+ if indentation == 0 {
+ return strings.Join(lines, "\n")
+ }
+
+ padding := strings.Repeat(" ", int(indentation))
+ for i := range lines {
+ if lines[i] != "" {
+ lines[i] = padding + lines[i]
+ }
+ }
+
+ return strings.Join(lines, "\n")
+}
+
+func (f Formatter) length(styled string) uint {
+ n := uint(0)
+ inStyle := false
+ for _, b := range styled {
+ if inStyle {
+ if b == 'm' {
+ inStyle = false
+ }
+ continue
+ }
+ if b == '\x1b' {
+ inStyle = true
+ continue
+ }
+ n += 1
+ }
+ return n
+}
+
+func (f Formatter) CycleJoin(elements []string, joiner string, cycle []string) string {
+ if len(elements) == 0 {
+ return ""
+ }
+ n := len(cycle)
+ out := ""
+ for i, text := range elements {
+ out += cycle[i%n] + text
+ if i < len(elements)-1 {
+ out += joiner
+ }
+ }
+ out += "{{/}}"
+ return f.style(out)
+}
+
+func (f Formatter) style(s string) string {
+ switch f.ColorMode {
+ case ColorModeNone:
+ return f.styleRe.ReplaceAllString(s, "")
+ case ColorModePassthrough:
+ return s
+ case ColorModeTerminal:
+ return f.styleRe.ReplaceAllStringFunc(s, func(match string) string {
+ if out, ok := f.colors[strings.Trim(match, "{}")]; ok {
+ return out
+ }
+ return match
+ })
+ }
+
+ return ""
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
new file mode 100644
index 0000000..5db5d1a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
@@ -0,0 +1,63 @@
+package build
+
+import (
+ "fmt"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildBuildCommand() command.Command {
+ var cliConfig = types.NewDefaultCLIConfig()
+ var goFlagsConfig = types.NewDefaultGoFlagsConfig()
+
+ flags, err := types.BuildBuildCommandFlagSet(&cliConfig, &goFlagsConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "build",
+ Flags: flags,
+ Usage: "ginkgo build ",
+ ShortDoc: "Build the passed in (or the package in the current directory if left blank).",
+ DocLink: "precompiling-suites",
+ Command: func(args []string, _ []string) {
+ var errors []error
+ cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
+ command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
+
+ buildSpecs(args, cliConfig, goFlagsConfig)
+ },
+ }
+}
+
+func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) {
+ suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+
+ internal.VerifyCLIAndFrameworkVersion(suites)
+
+ opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers())
+ opc.StartCompiling(suites, goFlagsConfig)
+
+ for {
+ suiteIdx, suite := opc.Next()
+ if suiteIdx >= len(suites) {
+ break
+ }
+ suites[suiteIdx] = suite
+ if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
+ fmt.Println(suite.CompilationError.Error())
+ } else {
+ fmt.Printf("Compiled %s.test\n", suite.PackageName)
+ }
+ }
+
+ if suites.CountWithState(internal.TestSuiteStateFailedToCompile) > 0 {
+ command.AbortWith("Failed to compile all tests")
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
new file mode 100644
index 0000000..2efd286
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
@@ -0,0 +1,61 @@
+package command
+
+import "fmt"
+
+type AbortDetails struct {
+ ExitCode int
+ Error error
+ EmitUsage bool
+}
+
+func Abort(details AbortDetails) {
+ panic(details)
+}
+
+func AbortGracefullyWith(format string, args ...interface{}) {
+ Abort(AbortDetails{
+ ExitCode: 0,
+ Error: fmt.Errorf(format, args...),
+ EmitUsage: false,
+ })
+}
+
+func AbortWith(format string, args ...interface{}) {
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf(format, args...),
+ EmitUsage: false,
+ })
+}
+
+func AbortWithUsage(format string, args ...interface{}) {
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf(format, args...),
+ EmitUsage: true,
+ })
+}
+
+func AbortIfError(preamble string, err error) {
+ if err != nil {
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf("%s\n%s", preamble, err.Error()),
+ EmitUsage: false,
+ })
+ }
+}
+
+func AbortIfErrors(preamble string, errors []error) {
+ if len(errors) > 0 {
+ out := ""
+ for _, err := range errors {
+ out += err.Error()
+ }
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf("%s\n%s", preamble, out),
+ EmitUsage: false,
+ })
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
new file mode 100644
index 0000000..12e0e56
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
@@ -0,0 +1,50 @@
+package command
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Command struct {
+ Name string
+ Flags types.GinkgoFlagSet
+ Usage string
+ ShortDoc string
+ Documentation string
+ DocLink string
+ Command func(args []string, additionalArgs []string)
+}
+
+func (c Command) Run(args []string, additionalArgs []string) {
+ args, err := c.Flags.Parse(args)
+ if err != nil {
+ AbortWithUsage(err.Error())
+ }
+
+ c.Command(args, additionalArgs)
+}
+
+func (c Command) EmitUsage(writer io.Writer) {
+ fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}"))
+ fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage))))
+ if c.ShortDoc != "" {
+ fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc))
+ fmt.Fprintln(writer, "")
+ }
+ if c.Documentation != "" {
+ fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation))
+ fmt.Fprintln(writer, "")
+ }
+ if c.DocLink != "" {
+ fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink))
+ fmt.Fprintln(writer, "")
+ }
+ flagUsage := c.Flags.Usage()
+ if flagUsage != "" {
+ fmt.Fprintf(writer, formatter.F(flagUsage))
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
new file mode 100644
index 0000000..88dd8d6
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
@@ -0,0 +1,182 @@
+package command
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Program struct {
+ Name string
+ Heading string
+ Commands []Command
+ DefaultCommand Command
+ DeprecatedCommands []DeprecatedCommand
+
+ //For testing - leave as nil in production
+ OutWriter io.Writer
+ ErrWriter io.Writer
+ Exiter func(code int)
+}
+
+type DeprecatedCommand struct {
+ Name string
+ Deprecation types.Deprecation
+}
+
+func (p Program) RunAndExit(osArgs []string) {
+ var command Command
+ deprecationTracker := types.NewDeprecationTracker()
+ if p.Exiter == nil {
+ p.Exiter = os.Exit
+ }
+ if p.OutWriter == nil {
+ p.OutWriter = formatter.ColorableStdOut
+ }
+ if p.ErrWriter == nil {
+ p.ErrWriter = formatter.ColorableStdErr
+ }
+
+ defer func() {
+ exitCode := 0
+
+ if r := recover(); r != nil {
+ details, ok := r.(AbortDetails)
+ if !ok {
+ panic(r)
+ }
+
+ if details.Error != nil {
+ fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name))
+ fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error()))
+ }
+ if details.EmitUsage {
+ if details.Error != nil {
+ fmt.Fprintln(p.ErrWriter, "")
+ }
+ command.EmitUsage(p.ErrWriter)
+ }
+ exitCode = details.ExitCode
+ }
+
+ command.Flags.ValidateDeprecations(deprecationTracker)
+ if deprecationTracker.DidTrackDeprecations() {
+ fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport())
+ }
+ p.Exiter(exitCode)
+ return
+ }()
+
+ args, additionalArgs := []string{}, []string{}
+
+ foundDelimiter := false
+ for _, arg := range osArgs[1:] {
+ if !foundDelimiter {
+ if arg == "--" {
+ foundDelimiter = true
+ continue
+ }
+ }
+
+ if foundDelimiter {
+ additionalArgs = append(additionalArgs, arg)
+ } else {
+ args = append(args, arg)
+ }
+ }
+
+ command = p.DefaultCommand
+ if len(args) > 0 {
+ p.handleHelpRequestsAndExit(p.OutWriter, args)
+ if command.Name == args[0] {
+ args = args[1:]
+ } else {
+ for _, deprecatedCommand := range p.DeprecatedCommands {
+ if deprecatedCommand.Name == args[0] {
+ deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation)
+ return
+ }
+ }
+ for _, tryCommand := range p.Commands {
+ if tryCommand.Name == args[0] {
+ command, args = tryCommand, args[1:]
+ break
+ }
+ }
+ }
+ }
+
+ command.Run(args, additionalArgs)
+}
+
+func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) {
+ if len(args) == 0 {
+ return
+ }
+
+ matchesHelpFlag := func(args ...string) bool {
+ for _, arg := range args {
+ if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" {
+ return true
+ }
+ }
+ return false
+ }
+ if len(args) == 1 {
+ if args[0] == "help" || matchesHelpFlag(args[0]) {
+ p.EmitUsage(writer)
+ Abort(AbortDetails{})
+ }
+ } else {
+ var name string
+ if args[0] == "help" || matchesHelpFlag(args[0]) {
+ name = args[1]
+ } else if matchesHelpFlag(args[1:]...) {
+ name = args[0]
+ } else {
+ return
+ }
+
+ if p.DefaultCommand.Name == name || p.Name == name {
+ p.DefaultCommand.EmitUsage(writer)
+ Abort(AbortDetails{})
+ }
+ for _, command := range p.Commands {
+ if command.Name == name {
+ command.EmitUsage(writer)
+ Abort(AbortDetails{})
+ }
+ }
+
+ fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name))
+ fmt.Fprintln(writer, "")
+ p.EmitUsage(writer)
+ Abort(AbortDetails{ExitCode: 1})
+ }
+ return
+}
+
+func (p Program) EmitUsage(writer io.Writer) {
+ fmt.Fprintln(writer, formatter.F(p.Heading))
+ fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading))))
+ fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name))
+ fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name))
+ fmt.Fprintln(writer, "")
+ fmt.Fprintln(writer, formatter.F("The following commands are available:"))
+
+ fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage))
+ if p.DefaultCommand.ShortDoc != "" {
+ fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc))
+ }
+
+ for _, command := range p.Commands {
+ fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage))
+ if command.ShortDoc != "" {
+ fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc))
+ }
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
new file mode 100644
index 0000000..a367a1f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
@@ -0,0 +1,48 @@
+package generators
+
+var bootstrapText = `package {{.Package}}
+
+import (
+ "testing"
+
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+)
+
+func Test{{.FormattedName}}(t *testing.T) {
+ {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
+ {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
+}
+`
+
+var agoutiBootstrapText = `package {{.Package}}
+
+import (
+ "testing"
+
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+ "github.com/sclevine/agouti"
+)
+
+func Test{{.FormattedName}}(t *testing.T) {
+ {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
+ {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
+}
+
+var agoutiDriver *agouti.WebDriver
+
+var _ = {{.GinkgoPackage}}BeforeSuite(func() {
+ // Choose a WebDriver:
+
+ agoutiDriver = agouti.PhantomJS()
+ // agoutiDriver = agouti.Selenium()
+ // agoutiDriver = agouti.ChromeDriver()
+
+ {{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed())
+})
+
+var _ = {{.GinkgoPackage}}AfterSuite(func() {
+ {{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed())
+})
+`
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
new file mode 100644
index 0000000..b2dc59b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
@@ -0,0 +1,133 @@
+package generators
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "text/template"
+
+ sprig "github.com/go-task/slim-sprig/v3"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildBootstrapCommand() command.Command {
+ conf := GeneratorsConfig{}
+ flags, err := types.NewGinkgoFlagSet(
+ types.GinkgoFlags{
+ {Name: "agouti", KeyPath: "Agouti",
+ Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"},
+ {Name: "nodot", KeyPath: "NoDot",
+ Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"},
+ {Name: "internal", KeyPath: "Internal",
+ Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
+ {Name: "template", KeyPath: "CustomTemplate",
+ UsageArgument: "template-file",
+ Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"},
+ {Name: "template-data", KeyPath: "CustomTemplateData",
+ UsageArgument: "template-data-file",
+ Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the bootstrap template"},
+ },
+ &conf,
+ types.GinkgoFlagSections{},
+ )
+
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "bootstrap",
+ Usage: "ginkgo bootstrap",
+ ShortDoc: "Bootstrap a test suite for the current package",
+ Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure.
+
+{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`,
+ DocLink: "generators",
+ Flags: flags,
+ Command: func(_ []string, _ []string) {
+ generateBootstrap(conf)
+ },
+ }
+}
+
+type bootstrapData struct {
+ Package string
+ FormattedName string
+
+ GinkgoImport string
+ GomegaImport string
+ GinkgoPackage string
+ GomegaPackage string
+ CustomData map[string]any
+}
+
+func generateBootstrap(conf GeneratorsConfig) {
+ packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
+
+ data := bootstrapData{
+ Package: determinePackageName(packageName, conf.Internal),
+ FormattedName: formattedName,
+
+ GinkgoImport: `. "github.com/onsi/ginkgo/v2"`,
+ GomegaImport: `. "github.com/onsi/gomega"`,
+ GinkgoPackage: "",
+ GomegaPackage: "",
+ }
+
+ if conf.NoDot {
+ data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
+ data.GomegaImport = `"github.com/onsi/gomega"`
+ data.GinkgoPackage = `ginkgo.`
+ data.GomegaPackage = `gomega.`
+ }
+
+ targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
+ if internal.FileExists(targetFile) {
+ command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
+ } else {
+ fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
+ }
+
+ f, err := os.Create(targetFile)
+ command.AbortIfError("Failed to create file:", err)
+ defer f.Close()
+
+ var templateText string
+ if conf.CustomTemplate != "" {
+ tpl, err := os.ReadFile(conf.CustomTemplate)
+ command.AbortIfError("Failed to read custom bootstrap file:", err)
+ templateText = string(tpl)
+ if conf.CustomTemplateData != "" {
+ var tplCustomDataMap map[string]any
+ tplCustomData, err := os.ReadFile(conf.CustomTemplateData)
+ command.AbortIfError("Failed to read custom boostrap data file:", err)
+ if !json.Valid([]byte(tplCustomData)) {
+ command.AbortWith("Invalid JSON object in custom data file.")
+ }
+ //create map from the custom template data
+ json.Unmarshal(tplCustomData, &tplCustomDataMap)
+ data.CustomData = tplCustomDataMap
+ }
+ } else if conf.Agouti {
+ templateText = agoutiBootstrapText
+ } else {
+ templateText = bootstrapText
+ }
+
+ //Setting the option to explicitly fail if template is rendered trying to access missing key
+ bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText)
+ command.AbortIfError("Failed to parse bootstrap template:", err)
+
+ buf := &bytes.Buffer{}
+ //Being explicit about failing sooner during template rendering
+ //when accessing custom data rather than during the go fmt command
+ err = bootstrapTemplate.Execute(buf, data)
+ command.AbortIfError("Failed to render bootstrap template:", err)
+
+ buf.WriteTo(f)
+
+ internal.GoFmt(targetFile)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
new file mode 100644
index 0000000..cf3b7cb
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
@@ -0,0 +1,265 @@
+package generators
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "text/template"
+
+ sprig "github.com/go-task/slim-sprig/v3"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildGenerateCommand() command.Command {
+ conf := GeneratorsConfig{}
+ flags, err := types.NewGinkgoFlagSet(
+ types.GinkgoFlags{
+ {Name: "agouti", KeyPath: "Agouti",
+ Usage: "If set, generate will create a test file for writing Agouti tests"},
+ {Name: "nodot", KeyPath: "NoDot",
+ Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"},
+ {Name: "internal", KeyPath: "Internal",
+ Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
+ {Name: "template", KeyPath: "CustomTemplate",
+ UsageArgument: "template-file",
+ Usage: "If specified, generate will use the contents of the file passed as the test file template"},
+ {Name: "template-data", KeyPath: "CustomTemplateData",
+ UsageArgument: "template-data-file",
+ Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"},
+ {Name: "tags", KeyPath: "Tags",
+ UsageArgument: "build-tags",
+ Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"},
+ },
+ &conf,
+ types.GinkgoFlagSections{},
+ )
+
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "generate",
+ Usage: "ginkgo generate ",
+ ShortDoc: "Generate a test file named _test.go",
+ Documentation: `If the optional argument is omitted, a file named after the package in the current directory will be created.
+
+You can pass multiple to generate multiple files simultaneously. The resulting files are named _test.go.
+
+You can also pass a of the form "file.go" and generate will emit "file_test.go".`,
+ DocLink: "generators",
+ Flags: flags,
+ Command: func(args []string, _ []string) {
+ generateTestFiles(conf, args)
+ },
+ }
+}
+
+type specData struct {
+ BuildTags string
+ Package string
+ Subject string
+ PackageImportPath string
+ ImportPackage bool
+
+ GinkgoImport string
+ GomegaImport string
+ GinkgoPackage string
+ GomegaPackage string
+ CustomData map[string]any
+}
+
+func generateTestFiles(conf GeneratorsConfig, args []string) {
+ subjects := args
+ if len(subjects) == 0 {
+ subjects = []string{""}
+ }
+ for _, subject := range subjects {
+ generateTestFileForSubject(subject, conf)
+ }
+}
+
+func generateTestFileForSubject(subject string, conf GeneratorsConfig) {
+ packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
+ if subject != "" {
+ specFilePrefix = formatSubject(subject)
+ formattedName = prettifyName(specFilePrefix)
+ }
+
+ if conf.Internal {
+ specFilePrefix = specFilePrefix + "_internal"
+ }
+
+ data := specData{
+ BuildTags: getBuildTags(conf.Tags),
+ Package: determinePackageName(packageName, conf.Internal),
+ Subject: formattedName,
+ PackageImportPath: getPackageImportPath(),
+ ImportPackage: !conf.Internal,
+
+ GinkgoImport: `. "github.com/onsi/ginkgo/v2"`,
+ GomegaImport: `. "github.com/onsi/gomega"`,
+ GinkgoPackage: "",
+ GomegaPackage: "",
+ }
+
+ if conf.NoDot {
+ data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
+ data.GomegaImport = `"github.com/onsi/gomega"`
+ data.GinkgoPackage = `ginkgo.`
+ data.GomegaPackage = `gomega.`
+ }
+
+ targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
+ if internal.FileExists(targetFile) {
+ command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
+ } else {
+ fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile)
+ }
+
+ f, err := os.Create(targetFile)
+ command.AbortIfError("Failed to create test file:", err)
+ defer f.Close()
+
+ var templateText string
+ if conf.CustomTemplate != "" {
+ tpl, err := os.ReadFile(conf.CustomTemplate)
+ command.AbortIfError("Failed to read custom template file:", err)
+ templateText = string(tpl)
+ if conf.CustomTemplateData != "" {
+ var tplCustomDataMap map[string]any
+ tplCustomData, err := os.ReadFile(conf.CustomTemplateData)
+ command.AbortIfError("Failed to read custom template data file:", err)
+ if !json.Valid([]byte(tplCustomData)) {
+ command.AbortWith("Invalid JSON object in custom data file.")
+ }
+ //create map from the custom template data
+ json.Unmarshal(tplCustomData, &tplCustomDataMap)
+ data.CustomData = tplCustomDataMap
+ }
+ } else if conf.Agouti {
+ templateText = agoutiSpecText
+ } else {
+ templateText = specText
+ }
+
+ //Setting the option to explicitly fail if template is rendered trying to access missing key
+ specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText)
+ command.AbortIfError("Failed to read parse test template:", err)
+
+ //Being explicit about failing sooner during template rendering
+ //when accessing custom data rather than during the go fmt command
+ err = specTemplate.Execute(f, data)
+ command.AbortIfError("Failed to render bootstrap template:", err)
+ internal.GoFmt(targetFile)
+}
+
+func formatSubject(name string) string {
+ name = strings.ReplaceAll(name, "-", "_")
+ name = strings.ReplaceAll(name, " ", "_")
+ name = strings.Split(name, ".go")[0]
+ name = strings.Split(name, "_test")[0]
+ return name
+}
+
+// moduleName returns module name from go.mod from given module root directory
+func moduleName(modRoot string) string {
+ modFile, err := os.Open(filepath.Join(modRoot, "go.mod"))
+ if err != nil {
+ return ""
+ }
+ defer modFile.Close()
+
+ mod := make([]byte, 128)
+ _, err = modFile.Read(mod)
+ if err != nil {
+ return ""
+ }
+
+ slashSlash := []byte("//")
+ moduleStr := []byte("module")
+
+ for len(mod) > 0 {
+ line := mod
+ mod = nil
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, mod = line[:i], line[i+1:]
+ }
+ if i := bytes.Index(line, slashSlash); i >= 0 {
+ line = line[:i]
+ }
+ line = bytes.TrimSpace(line)
+ if !bytes.HasPrefix(line, moduleStr) {
+ continue
+ }
+ line = line[len(moduleStr):]
+ n := len(line)
+ line = bytes.TrimSpace(line)
+ if len(line) == n || len(line) == 0 {
+ continue
+ }
+
+ if line[0] == '"' || line[0] == '`' {
+ p, err := strconv.Unquote(string(line))
+ if err != nil {
+ return "" // malformed quoted string or multiline module path
+ }
+ return p
+ }
+
+ return string(line)
+ }
+
+ return "" // missing module path
+}
+
+func findModuleRoot(dir string) (root string) {
+ dir = filepath.Clean(dir)
+
+ // Look for enclosing go.mod.
+ for {
+ if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
+ return dir
+ }
+ d := filepath.Dir(dir)
+ if d == dir {
+ break
+ }
+ dir = d
+ }
+ return ""
+}
+
+func getPackageImportPath() string {
+ workingDir, err := os.Getwd()
+ if err != nil {
+ panic(err.Error())
+ }
+
+ sep := string(filepath.Separator)
+
+ // Try go.mod file first
+ modRoot := findModuleRoot(workingDir)
+ if modRoot != "" {
+ modName := moduleName(modRoot)
+ if modName != "" {
+ cd := strings.ReplaceAll(workingDir, modRoot, "")
+ cd = strings.ReplaceAll(cd, sep, "/")
+ return modName + cd
+ }
+ }
+
+ // Fallback to GOPATH structure
+ paths := strings.Split(workingDir, sep+"src"+sep)
+ if len(paths) == 1 {
+ fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
+ return "UNKNOWN_PACKAGE_PATH"
+ }
+ return filepath.ToSlash(paths[len(paths)-1])
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
new file mode 100644
index 0000000..4dab07d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
@@ -0,0 +1,43 @@
+package generators
+
+var specText = `{{.BuildTags}}
+package {{.Package}}
+
+import (
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+
+ {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
+)
+
+var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
+
+})
+`
+
+var agoutiSpecText = `{{.BuildTags}}
+package {{.Package}}
+
+import (
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+ "github.com/sclevine/agouti"
+ . "github.com/sclevine/agouti/matchers"
+
+ {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
+)
+
+var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
+ var page *agouti.Page
+
+ {{.GinkgoPackage}}BeforeEach(func() {
+ var err error
+ page, err = agoutiDriver.NewPage()
+ {{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred())
+ })
+
+ {{.GinkgoPackage}}AfterEach(func() {
+ {{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed())
+ })
+})
+`
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
new file mode 100644
index 0000000..28c7aa6
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
@@ -0,0 +1,76 @@
+package generators
+
+import (
+ "fmt"
+ "go/build"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+)
+
+type GeneratorsConfig struct {
+ Agouti, NoDot, Internal bool
+ CustomTemplate string
+ CustomTemplateData string
+ Tags string
+}
+
+func getPackageAndFormattedName() (string, string, string) {
+ path, err := os.Getwd()
+ command.AbortIfError("Could not get current working directory:", err)
+
+ dirName := strings.ReplaceAll(filepath.Base(path), "-", "_")
+ dirName = strings.ReplaceAll(dirName, " ", "_")
+
+ pkg, err := build.ImportDir(path, 0)
+ packageName := pkg.Name
+ if err != nil {
+ packageName = ensureLegalPackageName(dirName)
+ }
+
+ formattedName := prettifyName(filepath.Base(path))
+ return packageName, dirName, formattedName
+}
+
+func ensureLegalPackageName(name string) string {
+ if name == "_" {
+ return "underscore"
+ }
+ if len(name) == 0 {
+ return "empty"
+ }
+ n, isDigitErr := strconv.Atoi(string(name[0]))
+ if isDigitErr == nil {
+ return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:]
+ }
+ return name
+}
+
+func prettifyName(name string) string {
+ name = strings.ReplaceAll(name, "-", " ")
+ name = strings.ReplaceAll(name, "_", " ")
+ name = strings.Title(name)
+ name = strings.ReplaceAll(name, " ", "")
+ return name
+}
+
+func determinePackageName(name string, internal bool) string {
+ if internal {
+ return name
+ }
+
+ return name + "_test"
+}
+
+// getBuildTags returns the resultant string to be added.
+// If the input string is not empty, then returns a `//go:build {}` string,
+// otherwise returns an empty string.
+func getBuildTags(tags string) string {
+ if tags != "" {
+ return fmt.Sprintf("//go:build %s\n", tags)
+ }
+ return ""
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
new file mode 100644
index 0000000..86da734
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
@@ -0,0 +1,161 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite {
+ if suite.PathToCompiledTest != "" {
+ return suite
+ }
+
+ suite.CompilationError = nil
+
+ path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test"))
+ if err != nil {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error())
+ return suite
+ }
+
+ ginkgoInvocationPath, _ := os.Getwd()
+ ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath)
+ packagePath := suite.AbsPath()
+ pathToInvocationPath, err := filepath.Rel(packagePath, ginkgoInvocationPath)
+ if err != nil {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
+ return suite
+ }
+ args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath)
+ if err != nil {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
+ return suite
+ }
+
+ cmd := exec.Command("go", args...)
+ cmd.Dir = suite.Path
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ if len(output) > 0 {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output)
+ } else {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error())
+ }
+ return suite
+ }
+
+ if strings.Contains(string(output), "[no test files]") {
+ suite.State = TestSuiteStateSkippedDueToEmptyCompilation
+ return suite
+ }
+
+ if len(output) > 0 {
+ fmt.Println(string(output))
+ }
+
+ if !FileExists(path) {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path)
+ return suite
+ }
+
+ suite.State = TestSuiteStateCompiled
+ suite.PathToCompiledTest = path
+ return suite
+}
+
+func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) {
+ if goFlagsConfig.BinaryMustBePreserved() {
+ return
+ }
+ for _, suite := range suites {
+ if !suite.Precompiled {
+ os.Remove(suite.PathToCompiledTest)
+ }
+ }
+}
+
+type parallelSuiteBundle struct {
+ suite TestSuite
+ compiled chan TestSuite
+}
+
+type OrderedParallelCompiler struct {
+ mutex *sync.Mutex
+ stopped bool
+ numCompilers int
+
+ idx int
+ numSuites int
+ completionChannels []chan TestSuite
+}
+
+func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler {
+ return &OrderedParallelCompiler{
+ mutex: &sync.Mutex{},
+ numCompilers: numCompilers,
+ }
+}
+
+func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) {
+ opc.stopped = false
+ opc.idx = 0
+ opc.numSuites = len(suites)
+ opc.completionChannels = make([]chan TestSuite, opc.numSuites)
+
+ toCompile := make(chan parallelSuiteBundle, opc.numCompilers)
+ for compiler := 0; compiler < opc.numCompilers; compiler++ {
+ go func() {
+ for bundle := range toCompile {
+ c, suite := bundle.compiled, bundle.suite
+ opc.mutex.Lock()
+ stopped := opc.stopped
+ opc.mutex.Unlock()
+ if !stopped {
+ suite = CompileSuite(suite, goFlagsConfig)
+ }
+ c <- suite
+ }
+ }()
+ }
+
+ for idx, suite := range suites {
+ opc.completionChannels[idx] = make(chan TestSuite, 1)
+ toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]}
+ if idx == 0 { //compile first suite serially
+ suite = <-opc.completionChannels[0]
+ opc.completionChannels[0] <- suite
+ }
+ }
+
+ close(toCompile)
+}
+
+func (opc *OrderedParallelCompiler) Next() (int, TestSuite) {
+ if opc.idx >= opc.numSuites {
+ return opc.numSuites, TestSuite{}
+ }
+
+ idx := opc.idx
+ suite := <-opc.completionChannels[idx]
+ opc.idx = opc.idx + 1
+
+ return idx, suite
+}
+
+func (opc *OrderedParallelCompiler) StopAndDrain() {
+ opc.mutex.Lock()
+ opc.stopped = true
+ opc.mutex.Unlock()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
new file mode 100644
index 0000000..3c5079f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
@@ -0,0 +1,129 @@
+// Copyright (c) 2015, Wade Simmons
+// All rights reserved.
+
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package gocovmerge takes the results from multiple `go test -coverprofile`
+// runs and merges them into one profile
+
+// this file was originally taken from the gocovmerge project
+// see also: https://go.shabbyrobe.org/gocovmerge
+package internal
+
+import (
+ "fmt"
+ "io"
+ "sort"
+
+ "golang.org/x/tools/cover"
+)
+
+func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
+ i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
+ if i < len(profiles) && profiles[i].FileName == p.FileName {
+ MergeCoverProfiles(profiles[i], p)
+ } else {
+ profiles = append(profiles, nil)
+ copy(profiles[i+1:], profiles[i:])
+ profiles[i] = p
+ }
+ return profiles
+}
+
+func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error {
+ if len(profiles) == 0 {
+ return nil
+ }
+ if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil {
+ return err
+ }
+ for _, p := range profiles {
+ for _, b := range p.Blocks {
+ if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error {
+ if into.Mode != merge.Mode {
+ return fmt.Errorf("cannot merge profiles with different modes")
+ }
+ // Since the blocks are sorted, we can keep track of where the last block
+ // was inserted and only look at the blocks after that as targets for merge
+ startIndex := 0
+ for _, b := range merge.Blocks {
+ var err error
+ startIndex, err = mergeProfileBlock(into, b, startIndex)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) {
+ sortFunc := func(i int) bool {
+ pi := p.Blocks[i+startIndex]
+ return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
+ }
+
+ i := 0
+ if sortFunc(i) != true {
+ i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
+ }
+
+ i += startIndex
+ if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
+ if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
+ return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb)
+ }
+ switch p.Mode {
+ case "set":
+ p.Blocks[i].Count |= pb.Count
+ case "count", "atomic":
+ p.Blocks[i].Count += pb.Count
+ default:
+ return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode)
+ }
+
+ } else {
+ if i > 0 {
+ pa := p.Blocks[i-1]
+ if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
+ return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb)
+ }
+ }
+ if i < len(p.Blocks)-1 {
+ pa := p.Blocks[i+1]
+ if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
+ return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb)
+ }
+ }
+ p.Blocks = append(p.Blocks, cover.ProfileBlock{})
+ copy(p.Blocks[i+1:], p.Blocks[i:])
+ p.Blocks[i] = pb
+ }
+
+ return i + 1, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
new file mode 100644
index 0000000..8e16d2b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
@@ -0,0 +1,227 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+
+ "github.com/google/pprof/profile"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/tools/cover"
+)
+
+func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
+ suffix := ""
+ if process != 0 {
+ suffix = fmt.Sprintf(".%d", process)
+ }
+ if cliConfig.OutputDir == "" {
+ return filepath.Join(suite.AbsPath(), assetName+suffix)
+ }
+ outputDir, _ := filepath.Abs(cliConfig.OutputDir)
+ return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix)
+}
+
+func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) {
+ messages := []string{}
+ suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile
+
+ // merge cover profiles if need be
+ if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles {
+ coverProfiles := []string{}
+ for _, suite := range suitesWithProfiles {
+ if !suite.HasProgrammaticFocus {
+ coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0))
+ }
+ }
+
+ if len(coverProfiles) > 0 {
+ dst := goFlagsConfig.CoverProfile
+ if cliConfig.OutputDir != "" {
+ dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile)
+ }
+ err := MergeAndCleanupCoverProfiles(coverProfiles, dst)
+ if err != nil {
+ return messages, err
+ }
+ coverage, err := GetCoverageFromCoverProfile(dst)
+ if err != nil {
+ return messages, err
+ }
+ if coverage == 0 {
+ messages = append(messages, "composite coverage: [no statements]")
+ } else if suitesWithProfiles.AnyHaveProgrammaticFocus() {
+ messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage))
+ } else {
+ messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage))
+ }
+ } else {
+ messages = append(messages, "no composite coverage computed: all suites included programatically focused specs")
+ }
+ }
+
+ // copy binaries if need be
+ for _, suite := range suitesWithProfiles {
+ if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" {
+ src := suite.PathToCompiledTest
+ dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test")
+ if suite.Precompiled {
+ if err := CopyFile(src, dst); err != nil {
+ return messages, err
+ }
+ } else {
+ if err := os.Rename(src, dst); err != nil {
+ return messages, err
+ }
+ }
+ }
+ }
+
+ type reportFormat struct {
+ ReportName string
+ GenerateFunc func(types.Report, string) error
+ MergeFunc func([]string, string) ([]string, error)
+ }
+ reportFormats := []reportFormat{}
+ if reporterConfig.JSONReport != "" {
+ reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports})
+ }
+ if reporterConfig.JUnitReport != "" {
+ reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports})
+ }
+ if reporterConfig.TeamcityReport != "" {
+ reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports})
+ }
+
+ // Generate reports for suites that failed to run
+ reportableSuites := suites.ThatAreGinkgoSuites()
+ for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) {
+ report := types.Report{
+ SuitePath: suite.AbsPath(),
+ SuiteConfig: suiteConfig,
+ SuiteSucceeded: false,
+ }
+ switch suite.State {
+ case TestSuiteStateFailedToCompile:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error())
+ case TestSuiteStateFailedDueToTimeout:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON)
+ case TestSuiteStateSkippedDueToPriorFailures:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON)
+ case TestSuiteStateSkippedDueToEmptyCompilation:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON)
+ report.SuiteSucceeded = true
+ }
+
+ for _, format := range reportFormats {
+ format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0))
+ }
+ }
+
+ // Merge reports unless we've been asked to keep them separate
+ if !cliConfig.KeepSeparateReports {
+ for _, format := range reportFormats {
+ reports := []string{}
+ for _, suite := range reportableSuites {
+ reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0))
+ }
+ dst := format.ReportName
+ if cliConfig.OutputDir != "" {
+ dst = filepath.Join(cliConfig.OutputDir, format.ReportName)
+ }
+ mergeMessages, err := format.MergeFunc(reports, dst)
+ messages = append(messages, mergeMessages...)
+ if err != nil {
+ return messages, err
+ }
+ }
+ }
+
+ return messages, nil
+}
+
+// loads each profile, merges them, deletes them, stores them in destination
+func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
+ var merged []*cover.Profile
+ for _, file := range profiles {
+ parsedProfiles, err := cover.ParseProfiles(file)
+ if err != nil {
+ return err
+ }
+ os.Remove(file)
+ for _, p := range parsedProfiles {
+ merged = AddCoverProfile(merged, p)
+ }
+ }
+ dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ return err
+ }
+ defer dst.Close()
+ err = DumpCoverProfiles(merged, dst)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func GetCoverageFromCoverProfile(profile string) (float64, error) {
+ cmd := exec.Command("go", "tool", "cover", "-func", profile)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output))
+ }
+ re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`)
+ matches := re.FindStringSubmatch(string(output))
+ if matches == nil {
+ return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage")
+ }
+ coverageString := matches[1]
+ coverage, err := strconv.ParseFloat(coverageString, 64)
+ if err != nil {
+ return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error())
+ }
+
+ return coverage, nil
+}
+
+func MergeProfiles(profilePaths []string, destination string) error {
+ profiles := []*profile.Profile{}
+ for _, profilePath := range profilePaths {
+ proFile, err := os.Open(profilePath)
+ if err != nil {
+ return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error())
+ }
+ prof, err := profile.Parse(proFile)
+ _ = proFile.Close()
+ if err != nil {
+ return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error())
+ }
+ profiles = append(profiles, prof)
+ os.Remove(profilePath)
+ }
+
+ mergedProfile, err := profile.Merge(profiles)
+ if err != nil {
+ return fmt.Errorf("Could not merge profiles:\n%s", err.Error())
+ }
+
+ outFile, err := os.Create(destination)
+ if err != nil {
+ return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error())
+ }
+ err = mergedProfile.Write(outFile)
+ if err != nil {
+ return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error())
+ }
+ err = outFile.Close()
+ if err != nil {
+ return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error())
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
new file mode 100644
index 0000000..41052ea
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
@@ -0,0 +1,355 @@
+package internal
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
+ suite.State = TestSuiteStateFailed
+ suite.HasProgrammaticFocus = false
+
+ if suite.PathToCompiledTest == "" {
+ return suite
+ }
+
+ if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 {
+ suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs)
+ } else if suite.IsGinkgo {
+ suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs)
+ } else {
+ suite = runGoTest(suite, cliConfig, goFlagsConfig)
+ }
+ runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite)
+ return suite
+}
+
+func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) {
+ buf := &bytes.Buffer{}
+ cmd := exec.Command(suite.PathToCompiledTest, args...)
+ cmd.Dir = suite.Path
+ if pipeToStdout {
+ cmd.Stderr = io.MultiWriter(os.Stdout, buf)
+ cmd.Stdout = os.Stdout
+ } else {
+ cmd.Stderr = buf
+ cmd.Stdout = buf
+ }
+ err := cmd.Start()
+ command.AbortIfError("Failed to start test suite", err)
+
+ return cmd, buf
+}
+
+func checkForNoTestsWarning(buf *bytes.Buffer) bool {
+ if strings.Contains(buf.String(), "warning: no tests to run") {
+ fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`)
+ return true
+ }
+ return false
+}
+
+func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite {
+ // As we run the go test from the suite directory, make sure the cover profile is absolute
+ // and placed into the expected output directory when one is configured.
+ if goFlagsConfig.Cover && !filepath.IsAbs(goFlagsConfig.CoverProfile) {
+ goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
+ }
+
+ args, err := types.GenerateGoTestRunArgs(goFlagsConfig)
+ command.AbortIfError("Failed to generate test run arguments", err)
+ cmd, buf := buildAndStartCommand(suite, args, true)
+
+ cmd.Wait()
+
+ exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+ passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+ passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed
+ if passed {
+ suite.State = TestSuiteStatePassed
+ } else {
+ suite.State = TestSuiteStateFailed
+ }
+
+ return suite
+}
+
+func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
+ if goFlagsConfig.Cover {
+ goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.BlockProfile != "" {
+ goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.CPUProfile != "" {
+ goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.MemProfile != "" {
+ goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.MutexProfile != "" {
+ goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0)
+ }
+ if reporterConfig.JSONReport != "" {
+ reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.JUnitReport != "" {
+ reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.TeamcityReport != "" {
+ reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0)
+ }
+
+ args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig)
+ command.AbortIfError("Failed to generate test run arguments", err)
+ args = append([]string{"--test.timeout=0"}, args...)
+ args = append(args, additionalArgs...)
+
+ cmd, buf := buildAndStartCommand(suite, args, true)
+
+ cmd.Wait()
+
+ exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+ suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+ passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+ passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed
+ if passed {
+ suite.State = TestSuiteStatePassed
+ } else {
+ suite.State = TestSuiteStateFailed
+ }
+
+ if suite.HasProgrammaticFocus {
+ if goFlagsConfig.Cover {
+ fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.BlockProfile != "" {
+ fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.CPUProfile != "" {
+ fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.MemProfile != "" {
+ fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.MutexProfile != "" {
+ fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused")
+ }
+ }
+
+ return suite
+}
+
+func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
+ type procResult struct {
+ passed bool
+ hasProgrammaticFocus bool
+ }
+
+ numProcs := cliConfig.ComputedProcs()
+ procOutput := make([]*bytes.Buffer, numProcs)
+ coverProfiles := []string{}
+
+ blockProfiles := []string{}
+ cpuProfiles := []string{}
+ memProfiles := []string{}
+ mutexProfiles := []string{}
+
+ procResults := make(chan procResult)
+
+ server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut))
+ command.AbortIfError("Failed to start parallel spec server", err)
+ server.Start()
+ defer server.Close()
+
+ if reporterConfig.JSONReport != "" {
+ reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.JUnitReport != "" {
+ reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.TeamcityReport != "" {
+ reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0)
+ }
+
+ for proc := 1; proc <= numProcs; proc++ {
+ procGinkgoConfig := ginkgoConfig
+ procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address()
+
+ procGoFlagsConfig := goFlagsConfig
+ if goFlagsConfig.Cover {
+ procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc)
+ coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile)
+ }
+ if goFlagsConfig.BlockProfile != "" {
+ procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc)
+ blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile)
+ }
+ if goFlagsConfig.CPUProfile != "" {
+ procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc)
+ cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile)
+ }
+ if goFlagsConfig.MemProfile != "" {
+ procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc)
+ memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile)
+ }
+ if goFlagsConfig.MutexProfile != "" {
+ procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc)
+ mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile)
+ }
+
+ args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig)
+ command.AbortIfError("Failed to generate test run arguments", err)
+ args = append([]string{"--test.timeout=0"}, args...)
+ args = append(args, additionalArgs...)
+
+ cmd, buf := buildAndStartCommand(suite, args, false)
+ procOutput[proc-1] = buf
+ server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() })
+
+ go func() {
+ cmd.Wait()
+ exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+ procResults <- procResult{
+ passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE),
+ hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE,
+ }
+ }()
+ }
+
+ passed := true
+ for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
+ result := <-procResults
+ passed = passed && result.passed
+ suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus
+ }
+ if passed {
+ suite.State = TestSuiteStatePassed
+ } else {
+ suite.State = TestSuiteStateFailed
+ }
+
+ select {
+ case <-server.GetSuiteDone():
+ fmt.Println("")
+ case <-time.After(time.Second):
+ //one of the nodes never finished reporting to the server. Something must have gone wrong.
+ fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n"))
+ fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path))
+ fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n"))
+ fmt.Fprintln(formatter.ColorableStdErr, " ")
+ for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
+ fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc))
+ fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String()))
+ }
+ fmt.Fprintf(os.Stderr, "** End **")
+ }
+
+ for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
+ output := procOutput[proc-1].String()
+ if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite {
+ suite.State = TestSuiteStateFailed
+ }
+ if strings.Contains(output, "deprecated Ginkgo functionality") {
+ fmt.Fprintln(os.Stderr, output)
+ }
+ }
+
+ if len(coverProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused")
+ } else {
+ coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
+ err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile)
+ command.AbortIfError("Failed to combine cover profiles", err)
+
+ coverage, err := GetCoverageFromCoverProfile(coverProfile)
+ command.AbortIfError("Failed to compute coverage", err)
+ if coverage == 0 {
+ fmt.Fprintln(os.Stdout, "coverage: [no statements]")
+ } else {
+ fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage)
+ }
+ }
+ }
+ if len(blockProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused")
+ } else {
+ blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0)
+ err := MergeProfiles(blockProfiles, blockProfile)
+ command.AbortIfError("Failed to combine blockprofiles", err)
+ }
+ }
+ if len(cpuProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused")
+ } else {
+ cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0)
+ err := MergeProfiles(cpuProfiles, cpuProfile)
+ command.AbortIfError("Failed to combine cpuprofiles", err)
+ }
+ }
+ if len(memProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused")
+ } else {
+ memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0)
+ err := MergeProfiles(memProfiles, memProfile)
+ command.AbortIfError("Failed to combine memprofiles", err)
+ }
+ }
+ if len(mutexProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused")
+ } else {
+ mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0)
+ err := MergeProfiles(mutexProfiles, mutexProfile)
+ command.AbortIfError("Failed to combine mutexprofiles", err)
+ }
+ }
+
+ return suite
+}
+
+func runAfterRunHook(command string, noColor bool, suite TestSuite) {
+ if command == "" {
+ return
+ }
+ f := formatter.NewWithNoColorBool(noColor)
+
+ // Allow for string replacement to pass input to the command
+ passed := "[FAIL]"
+ if suite.State.Is(TestSuiteStatePassed) {
+ passed = "[PASS]"
+ }
+ command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed)
+ command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName)
+
+ // Must break command into parts
+ splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
+ parts := splitArgs.FindAllString(command, -1)
+
+ output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
+ if err != nil {
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}"))
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output))
+ } else {
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}"))
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output))
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
new file mode 100644
index 0000000..df99875
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
@@ -0,0 +1,284 @@
+package internal
+
+import (
+ "errors"
+ "math/rand"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed"
+const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set"
+const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found"
+
+type TestSuiteState uint
+
+const (
+ TestSuiteStateInvalid TestSuiteState = iota
+
+ TestSuiteStateUncompiled
+ TestSuiteStateCompiled
+
+ TestSuiteStatePassed
+
+ TestSuiteStateSkippedDueToEmptyCompilation
+ TestSuiteStateSkippedByFilter
+ TestSuiteStateSkippedDueToPriorFailures
+
+ TestSuiteStateFailed
+ TestSuiteStateFailedDueToTimeout
+ TestSuiteStateFailedToCompile
+)
+
+var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile}
+
+func (state TestSuiteState) Is(states ...TestSuiteState) bool {
+ for _, suiteState := range states {
+ if suiteState == state {
+ return true
+ }
+ }
+
+ return false
+}
+
+type TestSuite struct {
+ Path string
+ PackageName string
+ IsGinkgo bool
+
+ Precompiled bool
+ PathToCompiledTest string
+ CompilationError error
+
+ HasProgrammaticFocus bool
+ State TestSuiteState
+}
+
+func (ts TestSuite) AbsPath() string {
+ path, _ := filepath.Abs(ts.Path)
+ return path
+}
+
+func (ts TestSuite) NamespacedName() string {
+ name := relPath(ts.Path)
+ name = strings.TrimLeft(name, "."+string(filepath.Separator))
+ name = strings.ReplaceAll(name, string(filepath.Separator), "_")
+ name = strings.ReplaceAll(name, " ", "_")
+ if name == "" {
+ return ts.PackageName
+ }
+ return name
+}
+
+type TestSuites []TestSuite
+
+func (ts TestSuites) AnyHaveProgrammaticFocus() bool {
+ for _, suite := range ts {
+ if suite.HasProgrammaticFocus {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (ts TestSuites) ThatAreGinkgoSuites() TestSuites {
+ out := TestSuites{}
+ for _, suite := range ts {
+ if suite.IsGinkgo {
+ out = append(out, suite)
+ }
+ }
+ return out
+}
+
+func (ts TestSuites) CountWithState(states ...TestSuiteState) int {
+ n := 0
+ for _, suite := range ts {
+ if suite.State.Is(states...) {
+ n += 1
+ }
+ }
+
+ return n
+}
+
+func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites {
+ out := TestSuites{}
+ for _, suite := range ts {
+ if suite.State.Is(states...) {
+ out = append(out, suite)
+ }
+ }
+
+ return out
+}
+
+func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites {
+ out := TestSuites{}
+ for _, suite := range ts {
+ if !suite.State.Is(states...) {
+ out = append(out, suite)
+ }
+ }
+
+ return out
+}
+
+func (ts TestSuites) ShuffledCopy(seed int64) TestSuites {
+ out := make(TestSuites, len(ts))
+ permutation := rand.New(rand.NewSource(seed)).Perm(len(ts))
+ for i, j := range permutation {
+ out[i] = ts[j]
+ }
+ return out
+}
+
+func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites {
+ suites := TestSuites{}
+
+ if len(args) > 0 {
+ for _, arg := range args {
+ if allowPrecompiled {
+ suite, err := precompiledTestSuite(arg)
+ if err == nil {
+ suites = append(suites, suite)
+ continue
+ }
+ }
+ recurseForSuite := cliConfig.Recurse
+ if strings.HasSuffix(arg, "/...") && arg != "/..." {
+ arg = arg[:len(arg)-4]
+ recurseForSuite = true
+ }
+ suites = append(suites, suitesInDir(arg, recurseForSuite)...)
+ }
+ } else {
+ suites = suitesInDir(".", cliConfig.Recurse)
+ }
+
+ if cliConfig.SkipPackage != "" {
+ skipFilters := strings.Split(cliConfig.SkipPackage, ",")
+ for idx := range suites {
+ for _, skipFilter := range skipFilters {
+ if strings.Contains(suites[idx].Path, skipFilter) {
+ suites[idx].State = TestSuiteStateSkippedByFilter
+ break
+ }
+ }
+ }
+ }
+
+ return suites
+}
+
+func precompiledTestSuite(path string) (TestSuite, error) {
+ info, err := os.Stat(path)
+ if err != nil {
+ return TestSuite{}, err
+ }
+
+ if info.IsDir() {
+ return TestSuite{}, errors.New("this is a directory, not a file")
+ }
+
+ if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" {
+ return TestSuite{}, errors.New("this is not a .test binary")
+ }
+
+ if filepath.Ext(path) == ".test" && runtime.GOOS != "windows" && info.Mode()&0111 == 0 {
+ return TestSuite{}, errors.New("this is not executable")
+ }
+
+ dir := relPath(filepath.Dir(path))
+ packageName := strings.TrimSuffix(filepath.Base(path), ".exe")
+ packageName = strings.TrimSuffix(packageName, ".test")
+
+ path, err = filepath.Abs(path)
+ if err != nil {
+ return TestSuite{}, err
+ }
+
+ return TestSuite{
+ Path: dir,
+ PackageName: packageName,
+ IsGinkgo: true,
+ Precompiled: true,
+ PathToCompiledTest: path,
+ State: TestSuiteStateCompiled,
+ }, nil
+}
+
+func suitesInDir(dir string, recurse bool) TestSuites {
+ suites := TestSuites{}
+
+ if path.Base(dir) == "vendor" {
+ return suites
+ }
+
+ files, _ := os.ReadDir(dir)
+ re := regexp.MustCompile(`^[^._].*_test\.go$`)
+ for _, file := range files {
+ if !file.IsDir() && re.MatchString(file.Name()) {
+ suite := TestSuite{
+ Path: relPath(dir),
+ PackageName: packageNameForSuite(dir),
+ IsGinkgo: filesHaveGinkgoSuite(dir, files),
+ State: TestSuiteStateUncompiled,
+ }
+ suites = append(suites, suite)
+ break
+ }
+ }
+
+ if recurse {
+ re = regexp.MustCompile(`^[._]`)
+ for _, file := range files {
+ if file.IsDir() && !re.MatchString(file.Name()) {
+ suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...)
+ }
+ }
+ }
+
+ return suites
+}
+
+func relPath(dir string) string {
+ dir, _ = filepath.Abs(dir)
+ cwd, _ := os.Getwd()
+ dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
+
+ if string(dir[0]) != "." {
+ dir = "." + string(filepath.Separator) + dir
+ }
+
+ return dir
+}
+
+func packageNameForSuite(dir string) string {
+ path, _ := filepath.Abs(dir)
+ return filepath.Base(path)
+}
+
+func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool {
+ reTestFile := regexp.MustCompile(`_test\.go$`)
+ reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`)
+
+ for _, file := range files {
+ if !file.IsDir() && reTestFile.MatchString(file.Name()) {
+ contents, _ := os.ReadFile(dir + "/" + file.Name())
+ if reGinkgo.Match(contents) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
new file mode 100644
index 0000000..bd9ca7d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
@@ -0,0 +1,86 @@
+package internal
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+)
+
+func FileExists(path string) bool {
+ _, err := os.Stat(path)
+ return err == nil
+}
+
+func CopyFile(src string, dest string) error {
+ srcFile, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+
+ srcStat, err := srcFile.Stat()
+ if err != nil {
+ return err
+ }
+
+ if _, err := os.Stat(dest); err == nil {
+ os.Remove(dest)
+ }
+
+ destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode())
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(destFile, srcFile)
+ if err != nil {
+ return err
+ }
+
+ if err := srcFile.Close(); err != nil {
+ return err
+ }
+ return destFile.Close()
+}
+
+func GoFmt(path string) {
+ out, err := exec.Command("go", "fmt", path).CombinedOutput()
+ if err != nil {
+ command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err)
+ }
+}
+
+func PluralizedWord(singular, plural string, count int) string {
+ if count == 1 {
+ return singular
+ }
+ return plural
+}
+
+func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string {
+ out := ""
+ out += "There were failures detected in the following suites:\n"
+
+ maxPackageNameLength := 0
+ for _, suite := range suites.WithState(TestSuiteStateFailureStates...) {
+ if len(suite.PackageName) > maxPackageNameLength {
+ maxPackageNameLength = len(suite.PackageName)
+ }
+ }
+
+ packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
+ for _, suite := range suites {
+ switch suite.State {
+ case TestSuiteStateFailed:
+ out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path)
+ case TestSuiteStateFailedToCompile:
+ out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path)
+ case TestSuiteStateFailedDueToTimeout:
+ out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON)
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
new file mode 100644
index 0000000..9da1bab
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
@@ -0,0 +1,54 @@
+package internal
+
+import (
+ "fmt"
+ "os/exec"
+ "regexp"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var versiorRe = regexp.MustCompile(`v(\d+\.\d+\.\d+)`)
+
+func VerifyCLIAndFrameworkVersion(suites TestSuites) {
+ cliVersion := types.VERSION
+ mismatches := map[string][]string{}
+
+ for _, suite := range suites {
+ cmd := exec.Command("go", "list", "-m", "github.com/onsi/ginkgo/v2")
+ cmd.Dir = suite.Path
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ continue
+ }
+ components := strings.Split(string(output), " ")
+ if len(components) != 2 {
+ continue
+ }
+ matches := versiorRe.FindStringSubmatch(components[1])
+ if matches == nil || len(matches) != 2 {
+ continue
+ }
+ libraryVersion := matches[1]
+ if cliVersion != libraryVersion {
+ mismatches[libraryVersion] = append(mismatches[libraryVersion], suite.PackageName)
+ }
+ }
+
+ if len(mismatches) == 0 {
+ return
+ }
+
+ fmt.Println(formatter.F("{{red}}{{bold}}Ginkgo detected a version mismatch between the Ginkgo CLI and the version of Ginkgo imported by your packages:{{/}}"))
+
+ fmt.Println(formatter.Fi(1, "Ginkgo CLI Version:"))
+ fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}}", cliVersion))
+ fmt.Println(formatter.Fi(1, "Mismatched package versions found:"))
+ for version, packages := range mismatches {
+ fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}} used by %s", version, strings.Join(packages, ", ")))
+ }
+ fmt.Println("")
+ fmt.Println(formatter.Fiw(1, formatter.COLS, "{{gray}}Ginkgo will continue to attempt to run but you may see errors (including flag parsing errors) and should either update your go.mod or your version of the Ginkgo CLI to match.\n\nTo install the matching version of the CLI run\n {{bold}}go install github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file. Alternatively you can use\n {{bold}}go run github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file to invoke the matching version of the Ginkgo CLI.\n\nIf you are attempting to test multiple packages that each have a different version of the Ginkgo library with a single Ginkgo CLI that is currently unsupported.\n{{/}}"))
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
new file mode 100644
index 0000000..6c61f09
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
@@ -0,0 +1,123 @@
+package labels
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+func BuildLabelsCommand() command.Command {
+ var cliConfig = types.NewDefaultCLIConfig()
+
+ flags, err := types.BuildLabelsCommandFlagSet(&cliConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "labels",
+ Usage: "ginkgo labels ",
+ Flags: flags,
+ ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).",
+ DocLink: "spec-labels",
+ Command: func(args []string, _ []string) {
+ ListLabels(args, cliConfig)
+ },
+ }
+}
+
+func ListLabels(args []string, cliConfig types.CLIConfig) {
+ suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+ for _, suite := range suites {
+ labels := fetchLabelsFromPackage(suite.Path)
+ if len(labels) == 0 {
+ fmt.Printf("%s: No labels found\n", suite.PackageName)
+ } else {
+ fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", "))
+ }
+ }
+}
+
+func fetchLabelsFromPackage(packagePath string) []string {
+ fset := token.NewFileSet()
+ parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0)
+ command.AbortIfError("Failed to parse package source:", err)
+
+ files := []*ast.File{}
+ hasTestPackage := false
+ for key, pkg := range parsedPackages {
+ if strings.HasSuffix(key, "_test") {
+ hasTestPackage = true
+ for _, file := range pkg.Files {
+ files = append(files, file)
+ }
+ }
+ }
+ if !hasTestPackage {
+ for _, pkg := range parsedPackages {
+ for _, file := range pkg.Files {
+ files = append(files, file)
+ }
+ }
+ }
+
+ seen := map[string]bool{}
+ labels := []string{}
+ ispr := inspector.New(files)
+ ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) {
+ potentialLabels := fetchLabels(n.(*ast.CallExpr))
+ for _, label := range potentialLabels {
+ if !seen[label] {
+ seen[label] = true
+ labels = append(labels, strconv.Quote(label))
+ }
+ }
+ })
+
+ sort.Strings(labels)
+ return labels
+}
+
+func fetchLabels(callExpr *ast.CallExpr) []string {
+ out := []string{}
+ switch expr := callExpr.Fun.(type) {
+ case *ast.Ident:
+ if expr.Name != "Label" {
+ return out
+ }
+ case *ast.SelectorExpr:
+ if expr.Sel.Name != "Label" {
+ return out
+ }
+ default:
+ return out
+ }
+ for _, arg := range callExpr.Args {
+ switch expr := arg.(type) {
+ case *ast.BasicLit:
+ if expr.Kind == token.STRING {
+ unquoted, err := strconv.Unquote(expr.Value)
+ if err != nil {
+ unquoted = expr.Value
+ }
+ validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{})
+ if err == nil {
+ out = append(out, validated)
+ }
+ }
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
new file mode 100644
index 0000000..e9abb27
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
@@ -0,0 +1,58 @@
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/build"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/generators"
+ "github.com/onsi/ginkgo/v2/ginkgo/labels"
+ "github.com/onsi/ginkgo/v2/ginkgo/outline"
+ "github.com/onsi/ginkgo/v2/ginkgo/run"
+ "github.com/onsi/ginkgo/v2/ginkgo/unfocus"
+ "github.com/onsi/ginkgo/v2/ginkgo/watch"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var program command.Program
+
+func GenerateCommands() []command.Command {
+ return []command.Command{
+ watch.BuildWatchCommand(),
+ build.BuildBuildCommand(),
+ generators.BuildBootstrapCommand(),
+ generators.BuildGenerateCommand(),
+ labels.BuildLabelsCommand(),
+ outline.BuildOutlineCommand(),
+ unfocus.BuildUnfocusCommand(),
+ BuildVersionCommand(),
+ }
+}
+
+func main() {
+ program = command.Program{
+ Name: "ginkgo",
+ Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION),
+ Commands: GenerateCommands(),
+ DefaultCommand: run.BuildRunCommand(),
+ DeprecatedCommands: []command.DeprecatedCommand{
+ {Name: "convert", Deprecation: types.Deprecations.Convert()},
+ {Name: "blur", Deprecation: types.Deprecations.Blur()},
+ {Name: "nodot", Deprecation: types.Deprecations.Nodot()},
+ },
+ }
+
+ program.RunAndExit(os.Args)
+}
+
+func BuildVersionCommand() command.Command {
+ return command.Command{
+ Name: "version",
+ Usage: "ginkgo version",
+ ShortDoc: "Print Ginkgo's version",
+ Command: func(_ []string, _ []string) {
+ fmt.Printf("Ginkgo Version %s\n", types.VERSION)
+ },
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
new file mode 100644
index 0000000..5d8d00b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
@@ -0,0 +1,301 @@
+package outline
+
+import (
+ "go/ast"
+ "go/token"
+ "strconv"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const (
+ // undefinedTextAlt is used if the spec/container text cannot be derived
+ undefinedTextAlt = "undefined"
+)
+
+// ginkgoMetadata holds useful bits of information for every entry in the outline
+type ginkgoMetadata struct {
+ // Name is the spec or container function name, e.g. `Describe` or `It`
+ Name string `json:"name"`
+
+ // Text is the `text` argument passed to specs, and some containers
+ Text string `json:"text"`
+
+ // Start is the position of first character of the spec or container block
+ Start int `json:"start"`
+
+ // End is the position of first character immediately after the spec or container block
+ End int `json:"end"`
+
+ Spec bool `json:"spec"`
+ Focused bool `json:"focused"`
+ Pending bool `json:"pending"`
+ Labels []string `json:"labels"`
+}
+
+// ginkgoNode is used to construct the outline as a tree
+type ginkgoNode struct {
+ ginkgoMetadata
+ Nodes []*ginkgoNode `json:"nodes"`
+}
+
+type walkFunc func(n *ginkgoNode)
+
+func (n *ginkgoNode) PreOrder(f walkFunc) {
+ f(n)
+ for _, m := range n.Nodes {
+ m.PreOrder(f)
+ }
+}
+
+func (n *ginkgoNode) PostOrder(f walkFunc) {
+ for _, m := range n.Nodes {
+ m.PostOrder(f)
+ }
+ f(n)
+}
+
+func (n *ginkgoNode) Walk(pre, post walkFunc) {
+ pre(n)
+ for _, m := range n.Nodes {
+ m.Walk(pre, post)
+ }
+ post(n)
+}
+
+// PropagateInheritedProperties propagates the Pending and Focused properties
+// through the subtree rooted at n.
+func (n *ginkgoNode) PropagateInheritedProperties() {
+ n.PreOrder(func(thisNode *ginkgoNode) {
+ for _, descendantNode := range thisNode.Nodes {
+ if thisNode.Pending {
+ descendantNode.Pending = true
+ descendantNode.Focused = false
+ }
+ if thisNode.Focused && !descendantNode.Pending {
+ descendantNode.Focused = true
+ }
+ }
+ })
+}
+
+// BackpropagateUnfocus propagates the Focused property through the subtree
+// rooted at n. It applies the rule described in the Ginkgo docs:
+// > Nested programmatically focused specs follow a simple rule: if a
+// > leaf-node is marked focused, any of its ancestor nodes that are marked
+// > focus will be unfocused.
+func (n *ginkgoNode) BackpropagateUnfocus() {
+ focusedSpecInSubtreeStack := []bool{}
+ n.PostOrder(func(thisNode *ginkgoNode) {
+ if thisNode.Spec {
+ focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused)
+ return
+ }
+ focusedSpecInSubtree := false
+ for range thisNode.Nodes {
+ focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1]
+ focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1]
+ }
+ focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree)
+ if focusedSpecInSubtree {
+ thisNode.Focused = false
+ }
+ })
+
+}
+
+func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) {
+ switch ex := ce.Fun.(type) {
+ case *ast.Ident:
+ return "", ex.Name, true
+ case *ast.SelectorExpr:
+ pkgID, ok := ex.X.(*ast.Ident)
+ if !ok {
+ return "", "", false
+ }
+ // A package identifier is top-level, so Obj must be nil
+ if pkgID.Obj != nil {
+ return "", "", false
+ }
+ if ex.Sel == nil {
+ return "", "", false
+ }
+ return pkgID.Name, ex.Sel.Name, true
+ default:
+ return "", "", false
+ }
+}
+
+// absoluteOffsetsForNode derives the absolute character offsets of the node start and
+// end positions.
+func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) {
+ return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset
+}
+
+// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree
+// corresponding to a Ginkgo container or spec.
+func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) {
+ packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce)
+ if !ok {
+ return nil, false
+ }
+
+ n := ginkgoNode{}
+ n.Name = identName
+ n.Start, n.End = absoluteOffsetsForNode(fset, ce)
+ n.Nodes = make([]*ginkgoNode, 0)
+ switch identName {
+ case "It", "Specify", "Entry":
+ n.Spec = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ n.Pending = pendingFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "FIt", "FSpecify", "FEntry":
+ n.Spec = true
+ n.Focused = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry":
+ n.Spec = true
+ n.Pending = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "Context", "Describe", "When", "DescribeTable":
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ n.Pending = pendingFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "FContext", "FDescribe", "FWhen", "FDescribeTable":
+ n.Focused = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable":
+ n.Pending = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "By":
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "AfterEach", "BeforeEach":
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "JustAfterEach", "JustBeforeEach":
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "AfterSuite", "BeforeSuite":
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "SynchronizedAfterSuite", "SynchronizedBeforeSuite":
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ default:
+ return nil, false
+ }
+}
+
+// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or
+// container. If it cannot derive it, it returns the alt text.
+func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string {
+ text, defined := textFromCallExpr(ce)
+ if !defined {
+ return alt
+ }
+ return text
+}
+
+// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If
+// it cannot derive it, it returns false.
+func textFromCallExpr(ce *ast.CallExpr) (string, bool) {
+ if len(ce.Args) < 1 {
+ return "", false
+ }
+ text, ok := ce.Args[0].(*ast.BasicLit)
+ if !ok {
+ return "", false
+ }
+ switch text.Kind {
+ case token.CHAR, token.STRING:
+ // For token.CHAR and token.STRING, Value is quoted
+ unquoted, err := strconv.Unquote(text.Value)
+ if err != nil {
+ // If unquoting fails, just use the raw Value
+ return text.Value, true
+ }
+ return unquoted, true
+ default:
+ return text.Value, true
+ }
+}
+
+func labelFromCallExpr(ce *ast.CallExpr) []string {
+
+ labels := []string{}
+ if len(ce.Args) < 2 {
+ return labels
+ }
+
+ for _, arg := range ce.Args[1:] {
+ switch expr := arg.(type) {
+ case *ast.CallExpr:
+ id, ok := expr.Fun.(*ast.Ident)
+ if !ok {
+ // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr
+ continue
+ }
+ if id.Name == "Label" {
+ ls := extractLabels(expr)
+ labels = append(labels, ls...)
+ }
+ }
+ }
+ return labels
+}
+
+func extractLabels(expr *ast.CallExpr) []string {
+ out := []string{}
+ for _, arg := range expr.Args {
+ switch expr := arg.(type) {
+ case *ast.BasicLit:
+ if expr.Kind == token.STRING {
+ unquoted, err := strconv.Unquote(expr.Value)
+ if err != nil {
+ unquoted = expr.Value
+ }
+ validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{})
+ if err == nil {
+ out = append(out, validated)
+ }
+ }
+ }
+ }
+
+ return out
+}
+
+func pendingFromCallExpr(ce *ast.CallExpr) bool {
+
+ pending := false
+ if len(ce.Args) < 2 {
+ return pending
+ }
+
+ for _, arg := range ce.Args[1:] {
+ switch expr := arg.(type) {
+ case *ast.CallExpr:
+ id, ok := expr.Fun.(*ast.Ident)
+ if !ok {
+ // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr
+ continue
+ }
+ if id.Name == "Pending" {
+ pending = true
+ }
+ case *ast.Ident:
+ if expr.Name == "Pending" {
+ pending = true
+ }
+ }
+ }
+ return pending
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
new file mode 100644
index 0000000..f0a6b5d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
@@ -0,0 +1,58 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Most of the required functions were available in the
+// "golang.org/x/tools/go/ast/astutil" package, but not exported.
+// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go
+
+package outline
+
+import (
+ "go/ast"
+ "strconv"
+ "strings"
+)
+
+// packageNameForImport returns the package name for the package. If the package
+// is not imported, it returns nil. "Package name" refers to `pkgname` in the
+// call expression `pkgname.ExportedIdentifier`. Examples:
+// (import path not found) -> nil
+// "import example.com/pkg/foo" -> "foo"
+// "import fooalias example.com/pkg/foo" -> "fooalias"
+// "import . example.com/pkg/foo" -> ""
+func packageNameForImport(f *ast.File, path string) *string {
+ spec := importSpec(f, path)
+ if spec == nil {
+ return nil
+ }
+ name := spec.Name.String()
+ if name == "" {
+ name = "ginkgo"
+ }
+ if name == "." {
+ name = ""
+ }
+ return &name
+}
+
+// importSpec returns the import spec if f imports path,
+// or nil otherwise.
+func importSpec(f *ast.File, path string) *ast.ImportSpec {
+ for _, s := range f.Imports {
+ if strings.HasPrefix(importPath(s), path) {
+ return s
+ }
+ }
+ return nil
+}
+
+// importPath returns the unquoted import path of s,
+// or "" if the path is not properly quoted.
+func importPath(s *ast.ImportSpec) string {
+ t, err := strconv.Unquote(s.Path.Value)
+ if err != nil {
+ return ""
+ }
+ return t
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
new file mode 100644
index 0000000..c2327cd
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
@@ -0,0 +1,110 @@
+package outline
+
+import (
+ "encoding/json"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "strings"
+
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+const (
+ // ginkgoImportPath is the well-known ginkgo import path
+ ginkgoImportPath = "github.com/onsi/ginkgo/v2"
+)
+
+// FromASTFile returns an outline for a Ginkgo test source file
+func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) {
+ ginkgoPackageName := packageNameForImport(src, ginkgoImportPath)
+ if ginkgoPackageName == nil {
+ return nil, fmt.Errorf("file does not import %q", ginkgoImportPath)
+ }
+
+ root := ginkgoNode{}
+ stack := []*ginkgoNode{&root}
+ ispr := inspector.New([]*ast.File{src})
+ ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool {
+ if push {
+ // Pre-order traversal
+ ce, ok := node.(*ast.CallExpr)
+ if !ok {
+ // Because `Nodes` calls this function only when the node is an
+ // ast.CallExpr, this should never happen
+ panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End()))
+ }
+ gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName)
+ if !ok {
+ // Node is not a Ginkgo spec or container, continue
+ return true
+ }
+ parent := stack[len(stack)-1]
+ parent.Nodes = append(parent.Nodes, gn)
+ stack = append(stack, gn)
+ return true
+ }
+ // Post-order traversal
+ start, end := absoluteOffsetsForNode(fset, node)
+ lastVisitedGinkgoNode := stack[len(stack)-1]
+ if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End {
+ // Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue
+ return true
+ }
+ stack = stack[0 : len(stack)-1]
+ return true
+ })
+ if len(root.Nodes) == 0 {
+ return &outline{[]*ginkgoNode{}}, nil
+ }
+
+ // Derive the final focused property for all nodes. This must be done
+ // _before_ propagating the inherited focused property.
+ root.BackpropagateUnfocus()
+ // Now, propagate inherited properties, including focused and pending.
+ root.PropagateInheritedProperties()
+
+ return &outline{root.Nodes}, nil
+}
+
+type outline struct {
+ Nodes []*ginkgoNode `json:"nodes"`
+}
+
+func (o *outline) MarshalJSON() ([]byte, error) {
+ return json.Marshal(o.Nodes)
+}
+
+// String returns a CSV-formatted outline. Spec or container are output in
+// depth-first order.
+func (o *outline) String() string {
+ return o.StringIndent(0)
+}
+
+// StringIndent returns a CSV-formated outline, but every line is indented by
+// one 'width' of spaces for every level of nesting.
+func (o *outline) StringIndent(width int) string {
+ var b strings.Builder
+ b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n")
+
+ currentIndent := 0
+ pre := func(n *ginkgoNode) {
+ b.WriteString(fmt.Sprintf("%*s", currentIndent, ""))
+ var labels string
+ if len(n.Labels) == 1 {
+ labels = n.Labels[0]
+ } else {
+ labels = strings.Join(n.Labels, ", ")
+ }
+ //enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings
+ b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels))
+ currentIndent += width
+ }
+ post := func(n *ginkgoNode) {
+ currentIndent -= width
+ }
+ for _, n := range o.Nodes {
+ n.Walk(pre, post)
+ }
+ return b.String()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
new file mode 100644
index 0000000..36698d4
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
@@ -0,0 +1,98 @@
+package outline
+
+import (
+ "encoding/json"
+ "fmt"
+ "go/parser"
+ "go/token"
+ "os"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const (
+ // indentWidth is the width used by the 'indent' output
+ indentWidth = 4
+ // stdinAlias is a portable alias for stdin. This convention is used in
+ // other CLIs, e.g., kubectl.
+ stdinAlias = "-"
+ usageCommand = "ginkgo outline "
+)
+
+type outlineConfig struct {
+ Format string
+}
+
+func BuildOutlineCommand() command.Command {
+ conf := outlineConfig{
+ Format: "csv",
+ }
+ flags, err := types.NewGinkgoFlagSet(
+ types.GinkgoFlags{
+ {Name: "format", KeyPath: "Format",
+ Usage: "Format of outline",
+ UsageArgument: "one of 'csv', 'indent', or 'json'",
+ UsageDefaultValue: conf.Format,
+ },
+ },
+ &conf,
+ types.GinkgoFlagSections{},
+ )
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "outline",
+ Usage: "ginkgo outline ",
+ ShortDoc: "Create an outline of Ginkgo symbols for a file",
+ Documentation: "To read from stdin, use: `ginkgo outline -`",
+ DocLink: "creating-an-outline-of-specs",
+ Flags: flags,
+ Command: func(args []string, _ []string) {
+ outlineFile(args, conf.Format)
+ },
+ }
+}
+
+func outlineFile(args []string, format string) {
+ if len(args) != 1 {
+ command.AbortWithUsage("outline expects exactly one argument")
+ }
+
+ filename := args[0]
+ var src *os.File
+ if filename == stdinAlias {
+ src = os.Stdin
+ } else {
+ var err error
+ src, err = os.Open(filename)
+ command.AbortIfError("Failed to open file:", err)
+ }
+
+ fset := token.NewFileSet()
+
+ parsedSrc, err := parser.ParseFile(fset, filename, src, 0)
+ command.AbortIfError("Failed to parse source:", err)
+
+ o, err := FromASTFile(fset, parsedSrc)
+ command.AbortIfError("Failed to create outline:", err)
+
+ var oerr error
+ switch format {
+ case "csv":
+ _, oerr = fmt.Print(o)
+ case "indent":
+ _, oerr = fmt.Print(o.StringIndent(indentWidth))
+ case "json":
+ b, err := json.Marshal(o)
+ if err != nil {
+ println(fmt.Sprintf("error marshalling to json: %s", err))
+ }
+ _, oerr = fmt.Println(string(b))
+ default:
+ command.AbortWith("Format %s not accepted", format)
+ }
+ command.AbortIfError("Failed to write outline:", oerr)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
new file mode 100644
index 0000000..aaed4d5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
@@ -0,0 +1,232 @@
+package run
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildRunCommand() command.Command {
+ var suiteConfig = types.NewDefaultSuiteConfig()
+ var reporterConfig = types.NewDefaultReporterConfig()
+ var cliConfig = types.NewDefaultCLIConfig()
+ var goFlagsConfig = types.NewDefaultGoFlagsConfig()
+
+ flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ interruptHandler := interrupt_handler.NewInterruptHandler(nil)
+ interrupt_handler.SwallowSigQuit()
+
+ return command.Command{
+ Name: "run",
+ Flags: flags,
+ Usage: "ginkgo run -- ",
+ ShortDoc: "Run the tests in the passed in (or the package in the current directory if left blank)",
+ Documentation: "Any arguments after -- will be passed to the test.",
+ DocLink: "running-tests",
+ Command: func(args []string, additionalArgs []string) {
+ var errors []error
+ cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
+ command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
+
+ runner := &SpecRunner{
+ cliConfig: cliConfig,
+ goFlagsConfig: goFlagsConfig,
+ suiteConfig: suiteConfig,
+ reporterConfig: reporterConfig,
+ flags: flags,
+
+ interruptHandler: interruptHandler,
+ }
+
+ runner.RunSpecs(args, additionalArgs)
+ },
+ }
+}
+
+type SpecRunner struct {
+ suiteConfig types.SuiteConfig
+ reporterConfig types.ReporterConfig
+ cliConfig types.CLIConfig
+ goFlagsConfig types.GoFlagsConfig
+ flags types.GinkgoFlagSet
+
+ interruptHandler *interrupt_handler.InterruptHandler
+}
+
+func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
+ suites := internal.FindSuites(args, r.cliConfig, true)
+ skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter)
+ suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter)
+
+ internal.VerifyCLIAndFrameworkVersion(suites)
+
+ if len(skippedSuites) > 0 {
+ fmt.Println("Will skip:")
+ for _, skippedSuite := range skippedSuites {
+ fmt.Println(" " + skippedSuite.Path)
+ }
+ }
+
+ if len(skippedSuites) > 0 && len(suites) == 0 {
+ command.AbortGracefullyWith("All tests skipped! Exiting...")
+ }
+
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+
+ if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) {
+ r.reporterConfig.Succinct = true
+ }
+
+ t := time.Now()
+ var endTime time.Time
+ if r.suiteConfig.Timeout > 0 {
+ endTime = t.Add(r.suiteConfig.Timeout)
+ }
+
+ iteration := 0
+OUTER_LOOP:
+ for {
+ if !r.flags.WasSet("seed") {
+ r.suiteConfig.RandomSeed = time.Now().Unix()
+ }
+ if r.cliConfig.RandomizeSuites && len(suites) > 1 {
+ suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed)
+ }
+
+ opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers())
+ opc.StartCompiling(suites, r.goFlagsConfig)
+
+ SUITE_LOOP:
+ for {
+ suiteIdx, suite := opc.Next()
+ if suiteIdx >= len(suites) {
+ break SUITE_LOOP
+ }
+ suites[suiteIdx] = suite
+
+ if r.interruptHandler.Status().Interrupted() {
+ opc.StopAndDrain()
+ break OUTER_LOOP
+ }
+
+ if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) {
+ fmt.Printf("Skipping %s (no test files)\n", suite.Path)
+ continue SUITE_LOOP
+ }
+
+ if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) {
+ fmt.Println(suites[suiteIdx].CompilationError.Error())
+ if !r.cliConfig.KeepGoing {
+ opc.StopAndDrain()
+ }
+ continue SUITE_LOOP
+ }
+
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing {
+ suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures
+ opc.StopAndDrain()
+ continue SUITE_LOOP
+ }
+
+ if !endTime.IsZero() {
+ r.suiteConfig.Timeout = endTime.Sub(time.Now())
+ if r.suiteConfig.Timeout <= 0 {
+ suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout
+ opc.StopAndDrain()
+ continue SUITE_LOOP
+ }
+ }
+
+ suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs)
+ }
+
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
+ if iteration > 0 {
+ fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1)
+ }
+ break OUTER_LOOP
+ }
+
+ if r.cliConfig.UntilItFails {
+ fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1))
+ } else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat {
+ fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1)
+ } else {
+ break OUTER_LOOP
+ }
+ iteration += 1
+ }
+
+ internal.Cleanup(r.goFlagsConfig, suites...)
+
+ messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig)
+ command.AbortIfError("could not finalize profiles:", err)
+ for _, message := range messages {
+ fmt.Println(message)
+ }
+
+ fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t))
+
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 {
+ if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
+ fmt.Printf("Test Suite Passed\n")
+ fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
+ command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE})
+ } else {
+ fmt.Printf("Test Suite Passed\n")
+ command.Abort(command.AbortDetails{})
+ }
+ } else {
+ fmt.Fprintln(formatter.ColorableStdOut, "")
+ if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
+ fmt.Fprintln(formatter.ColorableStdOut,
+ internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor)))
+ }
+ fmt.Printf("Test Suite Failed\n")
+ command.Abort(command.AbortDetails{ExitCode: 1})
+ }
+}
+
+func orcMessage(iteration int) string {
+ if iteration < 10 {
+ return ""
+ } else if iteration < 30 {
+ return []string{
+ "If at first you succeed...",
+ "...try, try again.",
+ "Looking good!",
+ "Still good...",
+ "I think your tests are fine....",
+ "Yep, still passing",
+ "Oh boy, here I go testin' again!",
+ "Even the gophers are getting bored",
+ "Did you try -race?",
+ "Maybe you should stop now?",
+ "I'm getting tired...",
+ "What if I just made you a sandwich?",
+ "Hit ^C, hit ^C, please hit ^C",
+ "Make it stop. Please!",
+ "Come on! Enough is enough!",
+ "Dave, this conversation can serve no purpose anymore. Goodbye.",
+ "Just what do you think you're doing, Dave? ",
+ "I, Sisyphus",
+ "Insanity: doing the same thing over and over again and expecting different results. -Einstein",
+ "I guess Einstein never tried to churn butter",
+ }[iteration-10] + "\n"
+ } else {
+ return "No, seriously... you can probably stop now.\n"
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
new file mode 100644
index 0000000..7dd2943
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
@@ -0,0 +1,186 @@
+package unfocus
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+)
+
+func BuildUnfocusCommand() command.Command {
+ return command.Command{
+ Name: "unfocus",
+ Usage: "ginkgo unfocus",
+ ShortDoc: "Recursively unfocus any focused tests under the current directory",
+ DocLink: "filtering-specs",
+ Command: func(_ []string, _ []string) {
+ unfocusSpecs()
+ },
+ }
+}
+
+func unfocusSpecs() {
+ fmt.Println("Scanning for focus...")
+
+ goFiles := make(chan string)
+ go func() {
+ unfocusDir(goFiles, ".")
+ close(goFiles)
+ }()
+
+ const workers = 10
+ wg := sync.WaitGroup{}
+ wg.Add(workers)
+
+ for i := 0; i < workers; i++ {
+ go func() {
+ for path := range goFiles {
+ unfocusFile(path)
+ }
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+}
+
+func unfocusDir(goFiles chan string, path string) {
+ files, err := os.ReadDir(path)
+ if err != nil {
+ fmt.Println(err.Error())
+ return
+ }
+
+ for _, f := range files {
+ switch {
+ case f.IsDir() && shouldProcessDir(f.Name()):
+ unfocusDir(goFiles, filepath.Join(path, f.Name()))
+ case !f.IsDir() && shouldProcessFile(f.Name()):
+ goFiles <- filepath.Join(path, f.Name())
+ }
+ }
+}
+
+func shouldProcessDir(basename string) bool {
+ return basename != "vendor" && !strings.HasPrefix(basename, ".")
+}
+
+func shouldProcessFile(basename string) bool {
+ return strings.HasSuffix(basename, ".go")
+}
+
+func unfocusFile(path string) {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ fmt.Printf("error reading file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments)
+ if err != nil {
+ fmt.Printf("error parsing file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ eliminations := scanForFocus(ast)
+ if len(eliminations) == 0 {
+ return
+ }
+
+ fmt.Printf("...updating %s\n", path)
+ backup, err := writeBackup(path, data)
+ if err != nil {
+ fmt.Printf("error creating backup file: %s\n", err.Error())
+ return
+ }
+
+ if err := updateFile(path, data, eliminations); err != nil {
+ fmt.Printf("error writing file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ os.Remove(backup)
+}
+
+func writeBackup(path string, data []byte) (string, error) {
+ t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path))
+
+ if err != nil {
+ return "", fmt.Errorf("error creating temporary file: %w", err)
+ }
+ defer t.Close()
+
+ if _, err := io.Copy(t, bytes.NewReader(data)); err != nil {
+ return "", fmt.Errorf("error writing to temporary file: %w", err)
+ }
+
+ return t.Name(), nil
+}
+
+func updateFile(path string, data []byte, eliminations [][]int64) error {
+ to, err := os.Create(path)
+ if err != nil {
+ return fmt.Errorf("error opening file for writing '%s': %w\n", path, err)
+ }
+ defer to.Close()
+
+ from := bytes.NewReader(data)
+ var cursor int64
+ for _, eliminationRange := range eliminations {
+ positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1]
+ if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil {
+ return fmt.Errorf("error copying data: %w", err)
+ }
+
+ cursor = positionToEliminate + lengthToEliminate
+
+ if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil {
+ return fmt.Errorf("error seeking to position in buffer: %w", err)
+ }
+ }
+
+ if _, err := io.Copy(to, from); err != nil {
+ return fmt.Errorf("error copying end data: %w", err)
+ }
+
+ return nil
+}
+
+func scanForFocus(file *ast.File) (eliminations [][]int64) {
+ ast.Inspect(file, func(n ast.Node) bool {
+ if c, ok := n.(*ast.CallExpr); ok {
+ if i, ok := c.Fun.(*ast.Ident); ok {
+ if isFocus(i.Name) {
+ eliminations = append(eliminations, []int64{int64(i.Pos()), 1})
+ }
+ }
+ }
+
+ if i, ok := n.(*ast.Ident); ok {
+ if i.Name == "Focus" {
+ eliminations = append(eliminations, []int64{int64(i.Pos()), 6})
+ }
+ }
+
+ return true
+ })
+
+ return eliminations
+}
+
+func isFocus(name string) bool {
+ switch name {
+ case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen":
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go
new file mode 100644
index 0000000..6c485c5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go
@@ -0,0 +1,22 @@
+package watch
+
+import "sort"
+
+type Delta struct {
+ ModifiedPackages []string
+
+ NewSuites []*Suite
+ RemovedSuites []*Suite
+ modifiedSuites []*Suite
+}
+
+type DescendingByDelta []*Suite
+
+func (a DescendingByDelta) Len() int { return len(a) }
+func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() }
+
+func (d Delta) ModifiedSuites() []*Suite {
+ sort.Sort(DescendingByDelta(d.modifiedSuites))
+ return d.modifiedSuites
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
new file mode 100644
index 0000000..26418ac
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
@@ -0,0 +1,75 @@
+package watch
+
+import (
+ "fmt"
+
+ "regexp"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+)
+
+type SuiteErrors map[internal.TestSuite]error
+
+type DeltaTracker struct {
+ maxDepth int
+ watchRegExp *regexp.Regexp
+ suites map[string]*Suite
+ packageHashes *PackageHashes
+}
+
+func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker {
+ return &DeltaTracker{
+ maxDepth: maxDepth,
+ watchRegExp: watchRegExp,
+ packageHashes: NewPackageHashes(watchRegExp),
+ suites: map[string]*Suite{},
+ }
+}
+
+func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) {
+ errors = SuiteErrors{}
+ delta.ModifiedPackages = d.packageHashes.CheckForChanges()
+
+ providedSuitePaths := map[string]bool{}
+ for _, suite := range suites {
+ providedSuitePaths[suite.Path] = true
+ }
+
+ d.packageHashes.StartTrackingUsage()
+
+ for _, suite := range d.suites {
+ if providedSuitePaths[suite.Suite.Path] {
+ if suite.Delta() > 0 {
+ delta.modifiedSuites = append(delta.modifiedSuites, suite)
+ }
+ } else {
+ delta.RemovedSuites = append(delta.RemovedSuites, suite)
+ }
+ }
+
+ d.packageHashes.StopTrackingUsageAndPrune()
+
+ for _, suite := range suites {
+ _, ok := d.suites[suite.Path]
+ if !ok {
+ s, err := NewSuite(suite, d.maxDepth, d.packageHashes)
+ if err != nil {
+ errors[suite] = err
+ continue
+ }
+ d.suites[suite.Path] = s
+ delta.NewSuites = append(delta.NewSuites, s)
+ }
+ }
+
+ return delta, errors
+}
+
+func (d *DeltaTracker) WillRun(suite internal.TestSuite) error {
+ s, ok := d.suites[suite.Path]
+ if !ok {
+ return fmt.Errorf("unknown suite %s", suite.Path)
+ }
+
+ return s.MarkAsRunAndRecomputedDependencies(d.maxDepth)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
new file mode 100644
index 0000000..a34d943
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
@@ -0,0 +1,92 @@
+package watch
+
+import (
+ "go/build"
+ "regexp"
+)
+
+var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
+var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing
+
+type Dependencies struct {
+ deps map[string]int
+}
+
+func NewDependencies(path string, maxDepth int) (Dependencies, error) {
+ d := Dependencies{
+ deps: map[string]int{},
+ }
+
+ if maxDepth == 0 {
+ return d, nil
+ }
+
+ err := d.seedWithDepsForPackageAtPath(path)
+ if err != nil {
+ return d, err
+ }
+
+ for depth := 1; depth < maxDepth; depth++ {
+ n := len(d.deps)
+ d.addDepsForDepth(depth)
+ if n == len(d.deps) {
+ break
+ }
+ }
+
+ return d, nil
+}
+
+func (d Dependencies) Dependencies() map[string]int {
+ return d.deps
+}
+
+func (d Dependencies) seedWithDepsForPackageAtPath(path string) error {
+ pkg, err := build.ImportDir(path, 0)
+ if err != nil {
+ return err
+ }
+
+ d.resolveAndAdd(pkg.Imports, 1)
+ d.resolveAndAdd(pkg.TestImports, 1)
+ d.resolveAndAdd(pkg.XTestImports, 1)
+
+ delete(d.deps, pkg.Dir)
+ return nil
+}
+
+func (d Dependencies) addDepsForDepth(depth int) {
+ for dep, depDepth := range d.deps {
+ if depDepth == depth {
+ d.addDepsForDep(dep, depth+1)
+ }
+ }
+}
+
+func (d Dependencies) addDepsForDep(dep string, depth int) {
+ pkg, err := build.ImportDir(dep, 0)
+ if err != nil {
+ println(err.Error())
+ return
+ }
+ d.resolveAndAdd(pkg.Imports, depth)
+}
+
+func (d Dependencies) resolveAndAdd(deps []string, depth int) {
+ for _, dep := range deps {
+ pkg, err := build.Import(dep, ".", 0)
+ if err != nil {
+ continue
+ }
+ if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) {
+ d.addDepIfNotPresent(pkg.Dir, depth)
+ }
+ }
+}
+
+func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
+ _, ok := d.deps[dep]
+ if !ok {
+ d.deps[dep] = depth
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
new file mode 100644
index 0000000..0e6ae1f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
@@ -0,0 +1,117 @@
+package watch
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "strings"
+ "time"
+)
+
+var goTestRegExp = regexp.MustCompile(`_test\.go$`)
+
+type PackageHash struct {
+ CodeModifiedTime time.Time
+ TestModifiedTime time.Time
+ Deleted bool
+
+ path string
+ codeHash string
+ testHash string
+ watchRegExp *regexp.Regexp
+}
+
+func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash {
+ p := &PackageHash{
+ path: path,
+ watchRegExp: watchRegExp,
+ }
+
+ p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes()
+
+ return p
+}
+
+func (p *PackageHash) CheckForChanges() bool {
+ codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes()
+
+ if deleted {
+ if !p.Deleted {
+ t := time.Now()
+ p.CodeModifiedTime = t
+ p.TestModifiedTime = t
+ }
+ p.Deleted = true
+ return true
+ }
+
+ modified := false
+ p.Deleted = false
+
+ if p.codeHash != codeHash {
+ p.CodeModifiedTime = codeModifiedTime
+ modified = true
+ }
+ if p.testHash != testHash {
+ p.TestModifiedTime = testModifiedTime
+ modified = true
+ }
+
+ p.codeHash = codeHash
+ p.testHash = testHash
+ return modified
+}
+
+func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) {
+ entries, err := os.ReadDir(p.path)
+
+ if err != nil {
+ deleted = true
+ return
+ }
+
+ for _, entry := range entries {
+ if entry.IsDir() {
+ continue
+ }
+
+ info, err := entry.Info()
+ if err != nil {
+ continue
+ }
+
+ if isHiddenFile(info) {
+ continue
+ }
+
+ if goTestRegExp.MatchString(info.Name()) {
+ testHash += p.hashForFileInfo(info)
+ if info.ModTime().After(testModifiedTime) {
+ testModifiedTime = info.ModTime()
+ }
+ continue
+ }
+
+ if p.watchRegExp.MatchString(info.Name()) {
+ codeHash += p.hashForFileInfo(info)
+ if info.ModTime().After(codeModifiedTime) {
+ codeModifiedTime = info.ModTime()
+ }
+ }
+ }
+
+ testHash += codeHash
+ if codeModifiedTime.After(testModifiedTime) {
+ testModifiedTime = codeModifiedTime
+ }
+
+ return
+}
+
+func isHiddenFile(info os.FileInfo) bool {
+ return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_")
+}
+
+func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
+ return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go
new file mode 100644
index 0000000..b4892be
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go
@@ -0,0 +1,85 @@
+package watch
+
+import (
+ "path/filepath"
+ "regexp"
+ "sync"
+)
+
+type PackageHashes struct {
+ PackageHashes map[string]*PackageHash
+ usedPaths map[string]bool
+ watchRegExp *regexp.Regexp
+ lock *sync.Mutex
+}
+
+func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes {
+ return &PackageHashes{
+ PackageHashes: map[string]*PackageHash{},
+ usedPaths: nil,
+ watchRegExp: watchRegExp,
+ lock: &sync.Mutex{},
+ }
+}
+
+func (p *PackageHashes) CheckForChanges() []string {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ modified := []string{}
+
+ for _, packageHash := range p.PackageHashes {
+ if packageHash.CheckForChanges() {
+ modified = append(modified, packageHash.path)
+ }
+ }
+
+ return modified
+}
+
+func (p *PackageHashes) Add(path string) *PackageHash {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ path, _ = filepath.Abs(path)
+ _, ok := p.PackageHashes[path]
+ if !ok {
+ p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp)
+ }
+
+ if p.usedPaths != nil {
+ p.usedPaths[path] = true
+ }
+ return p.PackageHashes[path]
+}
+
+func (p *PackageHashes) Get(path string) *PackageHash {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ path, _ = filepath.Abs(path)
+ if p.usedPaths != nil {
+ p.usedPaths[path] = true
+ }
+ return p.PackageHashes[path]
+}
+
+func (p *PackageHashes) StartTrackingUsage() {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ p.usedPaths = map[string]bool{}
+}
+
+func (p *PackageHashes) StopTrackingUsageAndPrune() {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ for path := range p.PackageHashes {
+ if !p.usedPaths[path] {
+ delete(p.PackageHashes, path)
+ }
+ }
+
+ p.usedPaths = nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
new file mode 100644
index 0000000..53272df
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
@@ -0,0 +1,87 @@
+package watch
+
+import (
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+)
+
+type Suite struct {
+ Suite internal.TestSuite
+ RunTime time.Time
+ Dependencies Dependencies
+
+ sharedPackageHashes *PackageHashes
+}
+
+func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
+ deps, err := NewDependencies(suite.Path, maxDepth)
+ if err != nil {
+ return nil, err
+ }
+
+ sharedPackageHashes.Add(suite.Path)
+ for dep := range deps.Dependencies() {
+ sharedPackageHashes.Add(dep)
+ }
+
+ return &Suite{
+ Suite: suite,
+ Dependencies: deps,
+
+ sharedPackageHashes: sharedPackageHashes,
+ }, nil
+}
+
+func (s *Suite) Delta() float64 {
+ delta := s.delta(s.Suite.Path, true, 0) * 1000
+ for dep, depth := range s.Dependencies.Dependencies() {
+ delta += s.delta(dep, false, depth)
+ }
+ return delta
+}
+
+func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error {
+ s.RunTime = time.Now()
+
+ deps, err := NewDependencies(s.Suite.Path, maxDepth)
+ if err != nil {
+ return err
+ }
+
+ s.sharedPackageHashes.Add(s.Suite.Path)
+ for dep := range deps.Dependencies() {
+ s.sharedPackageHashes.Add(dep)
+ }
+
+ s.Dependencies = deps
+
+ return nil
+}
+
+func (s *Suite) Description() string {
+ numDeps := len(s.Dependencies.Dependencies())
+ pluralizer := "ies"
+ if numDeps == 1 {
+ pluralizer = "y"
+ }
+ return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer)
+}
+
+func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 {
+ return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1)
+}
+
+func (s *Suite) dt(packagePath string, includeTests bool) time.Duration {
+ packageHash := s.sharedPackageHashes.Get(packagePath)
+ var modifiedTime time.Time
+ if includeTests {
+ modifiedTime = packageHash.TestModifiedTime
+ } else {
+ modifiedTime = packageHash.CodeModifiedTime
+ }
+
+ return modifiedTime.Sub(s.RunTime)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
new file mode 100644
index 0000000..bde4193
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
@@ -0,0 +1,192 @@
+package watch
+
+import (
+ "fmt"
+ "regexp"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildWatchCommand() command.Command {
+ var suiteConfig = types.NewDefaultSuiteConfig()
+ var reporterConfig = types.NewDefaultReporterConfig()
+ var cliConfig = types.NewDefaultCLIConfig()
+ var goFlagsConfig = types.NewDefaultGoFlagsConfig()
+
+ flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig)
+ if err != nil {
+ panic(err)
+ }
+ interruptHandler := interrupt_handler.NewInterruptHandler(nil)
+ interrupt_handler.SwallowSigQuit()
+
+ return command.Command{
+ Name: "watch",
+ Flags: flags,
+ Usage: "ginkgo watch -- ",
+ ShortDoc: "Watch the passed in and runs their tests whenever changes occur.",
+ Documentation: "Any arguments after -- will be passed to the test.",
+ DocLink: "watching-for-changes",
+ Command: func(args []string, additionalArgs []string) {
+ var errors []error
+ cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
+ command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
+
+ watcher := &SpecWatcher{
+ cliConfig: cliConfig,
+ goFlagsConfig: goFlagsConfig,
+ suiteConfig: suiteConfig,
+ reporterConfig: reporterConfig,
+ flags: flags,
+
+ interruptHandler: interruptHandler,
+ }
+
+ watcher.WatchSpecs(args, additionalArgs)
+ },
+ }
+}
+
+type SpecWatcher struct {
+ suiteConfig types.SuiteConfig
+ reporterConfig types.ReporterConfig
+ cliConfig types.CLIConfig
+ goFlagsConfig types.GoFlagsConfig
+ flags types.GinkgoFlagSet
+
+ interruptHandler *interrupt_handler.InterruptHandler
+}
+
+func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
+ suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+
+ internal.VerifyCLIAndFrameworkVersion(suites)
+
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+
+ fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth)
+ deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp))
+ delta, errors := deltaTracker.Delta(suites)
+
+ fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))
+ for _, suite := range delta.NewSuites {
+ fmt.Println(" " + suite.Description())
+ }
+
+ for suite, err := range errors {
+ fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
+ }
+
+ if len(suites) == 1 {
+ w.updateSeed()
+ w.compileAndRun(suites[0], additionalArgs)
+ }
+
+ ticker := time.NewTicker(time.Second)
+
+ for {
+ select {
+ case <-ticker.C:
+ suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+ delta, _ := deltaTracker.Delta(suites)
+ coloredStream := formatter.ColorableStdOut
+
+ suites = internal.TestSuites{}
+
+ if len(delta.NewSuites) > 0 {
+ fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))))
+ for _, suite := range delta.NewSuites {
+ suites = append(suites, suite.Suite)
+ fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description()))
+ }
+ }
+
+ modifiedSuites := delta.ModifiedSuites()
+ if len(modifiedSuites) > 0 {
+ fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}"))
+ for _, pkg := range delta.ModifiedPackages {
+ fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg))
+ }
+ fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites))))
+ for _, suite := range modifiedSuites {
+ suites = append(suites, suite.Suite)
+ fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description()))
+ }
+ fmt.Fprintln(coloredStream, "")
+ }
+
+ if len(suites) == 0 {
+ break
+ }
+
+ w.updateSeed()
+ w.computeSuccinctMode(len(suites))
+ for idx := range suites {
+ if w.interruptHandler.Status().Interrupted() {
+ return
+ }
+ deltaTracker.WillRun(suites[idx])
+ suites[idx] = w.compileAndRun(suites[idx], additionalArgs)
+ }
+ color := "{{green}}"
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
+ color = "{{red}}"
+ }
+ fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}"))
+
+ messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig)
+ command.AbortIfError("could not finalize profiles:", err)
+ for _, message := range messages {
+ fmt.Println(message)
+ }
+ case <-w.interruptHandler.Status().Channel:
+ return
+ }
+ }
+}
+
+func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite {
+ suite = internal.CompileSuite(suite, w.goFlagsConfig)
+ if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
+ fmt.Println(suite.CompilationError.Error())
+ return suite
+ }
+ if w.interruptHandler.Status().Interrupted() {
+ return suite
+ }
+ suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs)
+ internal.Cleanup(w.goFlagsConfig, suite)
+ return suite
+}
+
+func (w *SpecWatcher) computeSuccinctMode(numSuites int) {
+ if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) {
+ w.reporterConfig.Succinct = false
+ return
+ }
+
+ if w.flags.WasSet("succinct") {
+ return
+ }
+
+ if numSuites == 1 {
+ w.reporterConfig.Succinct = false
+ }
+
+ if numSuites > 1 {
+ w.reporterConfig.Succinct = true
+ }
+}
+
+func (w *SpecWatcher) updateSeed() {
+ if !w.flags.WasSet("seed") {
+ w.suiteConfig.RandomSeed = time.Now().Unix()
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go
new file mode 100644
index 0000000..8516272
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go
@@ -0,0 +1,8 @@
+//go:build ginkgoclidependencies
+// +build ginkgoclidependencies
+
+package ginkgo
+
+import (
+ _ "github.com/onsi/ginkgo/v2/ginkgo"
+)
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
new file mode 100644
index 0000000..02c6739
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
@@ -0,0 +1,180 @@
+package ginkgo
+
+import (
+ "testing"
+
+ "github.com/onsi/ginkgo/v2/internal/testingtproxy"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+GinkgoT() implements an interface that allows third party libraries to integrate with and build on top of Ginkgo.
+
+GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's methods. It can be typically be used a a drop-in replacement with third-party libraries that accept *testing.T through an interface.
+
+GinkgoT() takes an optional offset argument that can be used to get the
+correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately
+
+GinkgoT() attempts to mimic the behavior of `testing.T` with the exception of the following:
+
+- Error/Errorf: failures in Ginkgo always immediately stop execution and there is no mechanism to log a failure without aborting the test. As such Error/Errorf are equivalent to Fatal/Fatalf.
+- Parallel() is a no-op as Ginkgo's multi-process parallelism model is substantially different from go test's in-process model.
+
+You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries
+*/
+func GinkgoT(optionalOffset ...int) FullGinkgoTInterface {
+ offset := 1
+ if len(optionalOffset) > 0 {
+ offset = optionalOffset[0]
+ }
+ return testingtproxy.New(
+ GinkgoWriter,
+ Fail,
+ Skip,
+ DeferCleanup,
+ CurrentSpecReport,
+ AddReportEntry,
+ GinkgoRecover,
+ AttachProgressReporter,
+ suiteConfig.RandomSeed,
+ suiteConfig.ParallelProcess,
+ suiteConfig.ParallelTotal,
+ reporterConfig.NoColor,
+ offset)
+}
+
+/*
+The portion of the interface returned by GinkgoT() that maps onto methods in the testing package's T.
+*/
+type GinkgoTInterface interface {
+ Cleanup(func())
+ Setenv(kev, value string)
+ Error(args ...any)
+ Errorf(format string, args ...any)
+ Fail()
+ FailNow()
+ Failed() bool
+ Fatal(args ...any)
+ Fatalf(format string, args ...any)
+ Helper()
+ Log(args ...any)
+ Logf(format string, args ...any)
+ Name() string
+ Parallel()
+ Skip(args ...any)
+ SkipNow()
+ Skipf(format string, args ...any)
+ Skipped() bool
+ TempDir() string
+}
+
+/*
+Additional methods returned by GinkgoT() that provide deeper integration points into Ginkgo
+*/
+type FullGinkgoTInterface interface {
+ GinkgoTInterface
+
+ AddReportEntryVisibilityAlways(name string, args ...any)
+ AddReportEntryVisibilityFailureOrVerbose(name string, args ...any)
+ AddReportEntryVisibilityNever(name string, args ...any)
+
+ //Prints to the GinkgoWriter
+ Print(a ...any)
+ Printf(format string, a ...any)
+ Println(a ...any)
+
+ //Provides access to Ginkgo's color formatting, correctly configured to match the color settings specified in the invocation of ginkgo
+ F(format string, args ...any) string
+ Fi(indentation uint, format string, args ...any) string
+ Fiw(indentation uint, maxWidth uint, format string, args ...any) string
+
+ //Generates a formatted string version of the current spec's timeline
+ RenderTimeline() string
+
+ GinkgoRecover()
+ DeferCleanup(args ...any)
+
+ RandomSeed() int64
+ ParallelProcess() int
+ ParallelTotal() int
+
+ AttachProgressReporter(func() string) func()
+}
+
+/*
+GinkgoTB() implements a wrapper that exactly matches the testing.TB interface.
+
+In go 1.18 a new private() function was added to the testing.TB interface. Any function which accepts testing.TB as input needs to be passed in something that directly implements testing.TB.
+
+This wrapper satisfies the testing.TB interface and intended to be used as a drop-in replacement with third party libraries that accept testing.TB.
+
+Similar to GinkgoT(), GinkgoTB() takes an optional offset argument that can be used to get the
+correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately
+*/
+func GinkgoTB(optionalOffset ...int) *GinkgoTBWrapper {
+ offset := 2
+ if len(optionalOffset) > 0 {
+ offset = optionalOffset[0]
+ }
+ return &GinkgoTBWrapper{GinkgoT: GinkgoT(offset)}
+}
+
+type GinkgoTBWrapper struct {
+ testing.TB
+ GinkgoT FullGinkgoTInterface
+}
+
+func (g *GinkgoTBWrapper) Cleanup(f func()) {
+ g.GinkgoT.Cleanup(f)
+}
+func (g *GinkgoTBWrapper) Error(args ...any) {
+ g.GinkgoT.Error(args...)
+}
+func (g *GinkgoTBWrapper) Errorf(format string, args ...any) {
+ g.GinkgoT.Errorf(format, args...)
+}
+func (g *GinkgoTBWrapper) Fail() {
+ g.GinkgoT.Fail()
+}
+func (g *GinkgoTBWrapper) FailNow() {
+ g.GinkgoT.FailNow()
+}
+func (g *GinkgoTBWrapper) Failed() bool {
+ return g.GinkgoT.Failed()
+}
+func (g *GinkgoTBWrapper) Fatal(args ...any) {
+ g.GinkgoT.Fatal(args...)
+}
+func (g *GinkgoTBWrapper) Fatalf(format string, args ...any) {
+ g.GinkgoT.Fatalf(format, args...)
+}
+func (g *GinkgoTBWrapper) Helper() {
+ types.MarkAsHelper(1)
+}
+func (g *GinkgoTBWrapper) Log(args ...any) {
+ g.GinkgoT.Log(args...)
+}
+func (g *GinkgoTBWrapper) Logf(format string, args ...any) {
+ g.GinkgoT.Logf(format, args...)
+}
+func (g *GinkgoTBWrapper) Name() string {
+ return g.GinkgoT.Name()
+}
+func (g *GinkgoTBWrapper) Setenv(key, value string) {
+ g.GinkgoT.Setenv(key, value)
+}
+func (g *GinkgoTBWrapper) Skip(args ...any) {
+ g.GinkgoT.Skip(args...)
+}
+func (g *GinkgoTBWrapper) SkipNow() {
+ g.GinkgoT.SkipNow()
+}
+func (g *GinkgoTBWrapper) Skipf(format string, args ...any) {
+ g.GinkgoT.Skipf(format, args...)
+}
+func (g *GinkgoTBWrapper) Skipped() bool {
+ return g.GinkgoT.Skipped()
+}
+func (g *GinkgoTBWrapper) TempDir() string {
+ return g.GinkgoT.TempDir()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/counter.go b/vendor/github.com/onsi/ginkgo/v2/internal/counter.go
new file mode 100644
index 0000000..712d85a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/counter.go
@@ -0,0 +1,9 @@
+package internal
+
+func MakeIncrementingIndexCounter() func() (int, error) {
+ idx := -1
+ return func() (int, error) {
+ idx += 1
+ return idx, nil
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/failer.go b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
new file mode 100644
index 0000000..e9bd956
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
@@ -0,0 +1,99 @@
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Failer struct {
+ lock *sync.Mutex
+ failure types.Failure
+ state types.SpecState
+}
+
+func NewFailer() *Failer {
+ return &Failer{
+ lock: &sync.Mutex{},
+ state: types.SpecStatePassed,
+ }
+}
+
+func (f *Failer) GetState() types.SpecState {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ return f.state
+}
+
+func (f *Failer) GetFailure() types.Failure {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ return f.failure
+}
+
+func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.state == types.SpecStatePassed {
+ f.state = types.SpecStatePanicked
+ f.failure = types.Failure{
+ Message: "Test Panicked",
+ Location: location,
+ ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
+ }
+ }
+}
+
+func (f *Failer) Fail(message string, location types.CodeLocation) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.state == types.SpecStatePassed {
+ f.state = types.SpecStateFailed
+ f.failure = types.Failure{
+ Message: message,
+ Location: location,
+ }
+ }
+}
+
+func (f *Failer) Skip(message string, location types.CodeLocation) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.state == types.SpecStatePassed {
+ f.state = types.SpecStateSkipped
+ f.failure = types.Failure{
+ Message: message,
+ Location: location,
+ }
+ }
+}
+
+func (f *Failer) AbortSuite(message string, location types.CodeLocation) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.state == types.SpecStatePassed {
+ f.state = types.SpecStateAborted
+ f.failure = types.Failure{
+ Message: message,
+ Location: location,
+ }
+ }
+}
+
+func (f *Failer) Drain() (types.SpecState, types.Failure) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ failure := f.failure
+ outcome := f.state
+
+ f.state = types.SpecStatePassed
+ f.failure = types.Failure{}
+
+ return outcome, failure
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
new file mode 100644
index 0000000..e3da7d1
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
@@ -0,0 +1,122 @@
+package internal
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to
+unmark the container's focus. This gives developers a more intuitive experience when debugging specs.
+It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus -
+this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container:
+
+As a common example, consider:
+
+ FDescribe("something to debug", function() {
+ It("works", function() {...})
+ It("works", function() {...})
+ FIt("doesn't work", function() {...})
+ It("works", function() {...})
+ })
+
+here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container.
+The nested policy applied by this function enables this behavior.
+*/
+func ApplyNestedFocusPolicyToTree(tree *TreeNode) {
+ var walkTree func(tree *TreeNode) bool
+ walkTree = func(tree *TreeNode) bool {
+ if tree.Node.MarkedPending {
+ return false
+ }
+ hasFocusedDescendant := false
+ for _, child := range tree.Children {
+ childHasFocus := walkTree(child)
+ hasFocusedDescendant = hasFocusedDescendant || childHasFocus
+ }
+ tree.Node.MarkedFocus = tree.Node.MarkedFocus && !hasFocusedDescendant
+ return tree.Node.MarkedFocus || hasFocusedDescendant
+ }
+
+ walkTree(tree)
+}
+
+/*
+Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus"
+It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs.
+
+When both programmatic and file filters are provided their results are ANDed together. If multiple kinds of filters are provided, the file filters run first followed by the regex filters.
+
+This function sets the `Skip` property on specs by applying Ginkgo's focus policy:
+- If there are no CLI arguments and no programmatic focus, do nothing.
+- If a spec somewhere has programmatic focus skip any specs that have no programmatic focus.
+- If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters.
+
+*Note:* specs with pending nodes are Skipped when created by NewSpec.
+*/
+func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) {
+ focusString := strings.Join(suiteConfig.FocusStrings, "|")
+ skipString := strings.Join(suiteConfig.SkipStrings, "|")
+
+ type SkipCheck func(spec Spec) bool
+
+ // by default, skip any specs marked pending
+ skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }}
+ hasProgrammaticFocus := false
+
+ for _, spec := range specs {
+ if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() {
+ hasProgrammaticFocus = true
+ break
+ }
+ }
+
+ if hasProgrammaticFocus {
+ skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() })
+ }
+
+ if suiteConfig.LabelFilter != "" {
+ labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter)
+ skipChecks = append(skipChecks, func(spec Spec) bool {
+ return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels()))
+ })
+ }
+
+ if len(suiteConfig.FocusFiles) > 0 {
+ focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) })
+ }
+
+ if len(suiteConfig.SkipFiles) > 0 {
+ skipFilters, _ := types.ParseFileFilters(suiteConfig.SkipFiles)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return skipFilters.Matches(spec.Nodes.CodeLocations()) })
+ }
+
+ if focusString != "" {
+ // skip specs that don't match the focus string
+ re := regexp.MustCompile(focusString)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return !re.MatchString(description + " " + spec.Text()) })
+ }
+
+ if skipString != "" {
+ // skip specs that match the skip string
+ re := regexp.MustCompile(skipString)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return re.MatchString(description + " " + spec.Text()) })
+ }
+
+ // skip specs if shouldSkip() is true. note that we do nothing if shouldSkip() is false to avoid overwriting skip status established by the node's pending status
+ processedSpecs := Specs{}
+ for _, spec := range specs {
+ for _, skipCheck := range skipChecks {
+ if skipCheck(spec) {
+ spec.Skip = true
+ break
+ }
+ }
+ processedSpecs = append(processedSpecs, spec)
+ }
+
+ return processedSpecs, hasProgrammaticFocus
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go b/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
new file mode 100644
index 0000000..464e3c9
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
@@ -0,0 +1,28 @@
+package global
+
+import (
+ "github.com/onsi/ginkgo/v2/internal"
+)
+
+var Suite *internal.Suite
+var Failer *internal.Failer
+var backupSuite *internal.Suite
+
+func init() {
+ InitializeGlobals()
+}
+
+func InitializeGlobals() {
+ Failer = internal.NewFailer()
+ Suite = internal.NewSuite()
+}
+
+func PushClone() error {
+ var err error
+ backupSuite, err = Suite.Clone()
+ return err
+}
+
+func PopClone() {
+ Suite = backupSuite
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go
new file mode 100644
index 0000000..02c9fe4
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go
@@ -0,0 +1,383 @@
+package internal
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type runOncePair struct {
+ //nodeId should only run once...
+ nodeID uint
+ nodeType types.NodeType
+ //...for specs in a hierarchy that includes this context
+ containerID uint
+}
+
+func (pair runOncePair) isZero() bool {
+ return pair.nodeID == 0
+}
+
+func runOncePairForNode(node Node, containerID uint) runOncePair {
+ return runOncePair{
+ nodeID: node.ID,
+ nodeType: node.NodeType,
+ containerID: containerID,
+ }
+}
+
+type runOncePairs []runOncePair
+
+func runOncePairsForSpec(spec Spec) runOncePairs {
+ pairs := runOncePairs{}
+
+ containers := spec.Nodes.WithType(types.NodeTypeContainer)
+ for _, node := range spec.Nodes {
+ if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
+ pairs = append(pairs, runOncePairForNode(node, containers.FirstWithNestingLevel(node.NestingLevel-1).ID))
+ } else if node.NodeType.Is(types.NodeTypeBeforeEach|types.NodeTypeJustBeforeEach|types.NodeTypeAfterEach|types.NodeTypeJustAfterEach) && node.MarkedOncePerOrdered {
+ passedIntoAnOrderedContainer := false
+ firstOrderedContainerDeeperThanNode := containers.FirstSatisfying(func(container Node) bool {
+ passedIntoAnOrderedContainer = passedIntoAnOrderedContainer || container.MarkedOrdered
+ return container.NestingLevel >= node.NestingLevel && passedIntoAnOrderedContainer
+ })
+ if firstOrderedContainerDeeperThanNode.IsZero() {
+ continue
+ }
+ pairs = append(pairs, runOncePairForNode(node, firstOrderedContainerDeeperThanNode.ID))
+ }
+ }
+
+ return pairs
+}
+
+func (pairs runOncePairs) runOncePairFor(nodeID uint) runOncePair {
+ for i := range pairs {
+ if pairs[i].nodeID == nodeID {
+ return pairs[i]
+ }
+ }
+ return runOncePair{}
+}
+
+func (pairs runOncePairs) hasRunOncePair(pair runOncePair) bool {
+ for i := range pairs {
+ if pairs[i] == pair {
+ return true
+ }
+ }
+ return false
+}
+
+func (pairs runOncePairs) withType(nodeTypes types.NodeType) runOncePairs {
+ count := 0
+ for i := range pairs {
+ if pairs[i].nodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out, j := make(runOncePairs, count), 0
+ for i := range pairs {
+ if pairs[i].nodeType.Is(nodeTypes) {
+ out[j] = pairs[i]
+ j++
+ }
+ }
+ return out
+}
+
+type group struct {
+ suite *Suite
+ specs Specs
+ runOncePairs map[uint]runOncePairs
+ runOnceTracker map[runOncePair]types.SpecState
+
+ succeeded bool
+ failedInARunOnceBefore bool
+ continueOnFailure bool
+}
+
+func newGroup(suite *Suite) *group {
+ return &group{
+ suite: suite,
+ runOncePairs: map[uint]runOncePairs{},
+ runOnceTracker: map[runOncePair]types.SpecState{},
+ succeeded: true,
+ failedInARunOnceBefore: false,
+ continueOnFailure: false,
+ }
+}
+
+func (g *group) initialReportForSpec(spec Spec) types.SpecReport {
+ return types.SpecReport{
+ ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
+ ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
+ ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
+ LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
+ LeafNodeType: types.NodeTypeIt,
+ LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
+ LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
+ ParallelProcess: g.suite.config.ParallelProcess,
+ RunningInParallel: g.suite.isRunningInParallel(),
+ IsSerial: spec.Nodes.HasNodeMarkedSerial(),
+ IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
+ MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
+ MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(),
+ }
+}
+
+func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) {
+ if spec.Nodes.HasNodeMarkedPending() {
+ return types.SpecStatePending, types.Failure{}
+ }
+ if spec.Skip {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if g.suite.interruptHandler.Status().Interrupted() || g.suite.skipAll {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if !g.succeeded && !g.continueOnFailure {
+ return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
+ "Spec skipped because an earlier spec in an ordered container failed")
+ }
+ if g.failedInARunOnceBefore && g.continueOnFailure {
+ return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
+ "Spec skipped because a BeforeAll node failed")
+ }
+ beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach)
+ for _, pair := range beforeOncePairs {
+ if g.runOnceTracker[pair].Is(types.SpecStateSkipped) {
+ return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
+ fmt.Sprintf("Spec skipped because Skip() was called in %s", pair.nodeType))
+ }
+ }
+ if g.suite.config.DryRun {
+ return types.SpecStatePassed, types.Failure{}
+ }
+ return g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure
+}
+
+func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool {
+ lastSpecID := uint(0)
+ for idx := range g.specs {
+ if g.specs[idx].Skip {
+ continue
+ }
+ sID := g.specs[idx].SubjectID()
+ if g.runOncePairs[sID].hasRunOncePair(pair) {
+ lastSpecID = sID
+ }
+ }
+ return lastSpecID == specID
+}
+
+func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) bool {
+ failedInARunOnceBefore := false
+ pairs := g.runOncePairs[spec.SubjectID()]
+
+ nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
+ nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel()
+ nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...)
+ nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt))
+ terminatingNode, terminatingPair := Node{}, runOncePair{}
+
+ deadline := time.Time{}
+ if spec.SpecTimeout() > 0 {
+ deadline = time.Now().Add(spec.SpecTimeout())
+ }
+
+ for _, node := range nodes {
+ oncePair := pairs.runOncePairFor(node.ID)
+ if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) {
+ continue
+ }
+ g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node))
+ g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
+ if !oncePair.isZero() {
+ g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State
+ }
+ if g.suite.currentSpecReport.State != types.SpecStatePassed {
+ terminatingNode, terminatingPair = node, oncePair
+ failedInARunOnceBefore = !terminatingPair.isZero()
+ break
+ }
+ }
+
+ afterNodeWasRun := map[uint]bool{}
+ includeDeferCleanups := false
+ for {
+ nodes := spec.Nodes.WithType(types.NodeTypeAfterEach)
+ nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()
+ nodes = append(spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel(), nodes...)
+ if !terminatingNode.IsZero() {
+ nodes = nodes.WithinNestingLevel(terminatingNode.NestingLevel)
+ }
+ if includeDeferCleanups {
+ nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()...)
+ nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse()...)
+ }
+ nodes = nodes.Filter(func(node Node) bool {
+ if afterNodeWasRun[node.ID] {
+ //this node has already been run on this attempt, don't rerun it
+ return false
+ }
+ var pair runOncePair
+ switch node.NodeType {
+ case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll:
+ // check if we were generated in an AfterNode that has already run
+ if afterNodeWasRun[node.NodeIDWhereCleanupWasGenerated] {
+ return true // we were, so we should definitely run this cleanup now
+ }
+ // looks like this cleanup nodes was generated by a before node or it.
+ // the run-once status of a cleanup node is governed by the run-once status of its generator
+ pair = pairs.runOncePairFor(node.NodeIDWhereCleanupWasGenerated)
+ default:
+ pair = pairs.runOncePairFor(node.ID)
+ }
+ if pair.isZero() {
+ // this node is not governed by any run-once policy, we should run it
+ return true
+ }
+ // it's our last chance to run if we're the last spec for our oncePair
+ isLastSpecWithPair := g.isLastSpecWithPair(spec.SubjectID(), pair)
+
+ switch g.suite.currentSpecReport.State {
+ case types.SpecStatePassed: //this attempt is passing...
+ return isLastSpecWithPair //...we should run-once if we'this is our last chance
+ case types.SpecStateSkipped: //the spec was skipped by the user...
+ if isLastSpecWithPair {
+ return true //...we're the last spec, so we should run the AfterNode
+ }
+ if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel {
+ return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run
+ }
+ case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: // the spec has failed...
+ if isFinalAttempt {
+ if g.continueOnFailure {
+ return isLastSpecWithPair || failedInARunOnceBefore //...we're configured to continue on failures - so we should only run if we're the last spec for this pair or if we failed in a runOnceBefore (which means we _are_ the last spec to run)
+ } else {
+ return true //...this was the last attempt and continueOnFailure is false therefore we are the last spec to run and so the AfterNode should run
+ }
+ }
+ if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again
+ if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) {
+ return terminatingNode.ID == node.NodeIDWhereCleanupWasGenerated // we should run this node if we're a clean-up generated by it
+ } else {
+ return terminatingNode.NestingLevel == node.NestingLevel // ...or if we're at the same nesting level
+ }
+ }
+ case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted
+ return true //...that means the test run is over and we should clean up the stack. Run the AfterNode
+ }
+ return false
+ })
+
+ if len(nodes) == 0 && includeDeferCleanups {
+ break
+ }
+
+ for _, node := range nodes {
+ afterNodeWasRun[node.ID] = true
+ state, failure := g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node))
+ g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
+ if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted {
+ g.suite.currentSpecReport.State = state
+ g.suite.currentSpecReport.Failure = failure
+ } else if state.Is(types.SpecStateFailureStates) {
+ g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, types.AdditionalFailure{State: state, Failure: failure})
+ }
+ }
+ includeDeferCleanups = true
+ }
+
+ return failedInARunOnceBefore
+}
+
+func (g *group) run(specs Specs) {
+ g.specs = specs
+ g.continueOnFailure = specs[0].Nodes.FirstNodeMarkedOrdered().MarkedContinueOnFailure
+ for _, spec := range g.specs {
+ g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec)
+ }
+
+ for _, spec := range g.specs {
+ g.suite.selectiveLock.Lock()
+ g.suite.currentSpecReport = g.initialReportForSpec(spec)
+ g.suite.selectiveLock.Unlock()
+
+ g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec)
+ g.suite.reporter.WillRun(g.suite.currentSpecReport)
+ g.suite.reportEach(spec, types.NodeTypeReportBeforeEach)
+
+ skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending)
+
+ g.suite.currentSpecReport.StartTime = time.Now()
+ failedInARunOnceBefore := false
+ if !skip {
+ var maxAttempts = 1
+
+ if g.suite.config.MustPassRepeatedly > 0 {
+ maxAttempts = g.suite.config.MustPassRepeatedly
+ g.suite.currentSpecReport.MaxMustPassRepeatedly = maxAttempts
+ } else if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
+ maxAttempts = max(1, spec.MustPassRepeatedly())
+ } else if g.suite.config.FlakeAttempts > 0 {
+ maxAttempts = g.suite.config.FlakeAttempts
+ g.suite.currentSpecReport.MaxFlakeAttempts = maxAttempts
+ } else if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
+ maxAttempts = max(1, spec.FlakeAttempts())
+ }
+
+ for attempt := 0; attempt < maxAttempts; attempt++ {
+ g.suite.currentSpecReport.NumAttempts = attempt + 1
+ g.suite.writer.Truncate()
+ g.suite.outputInterceptor.StartInterceptingOutput()
+ if attempt > 0 {
+ if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
+ g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRepeat, Attempt: attempt})
+ }
+ if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
+ g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRetry, Attempt: attempt})
+ }
+ }
+
+ failedInARunOnceBefore = g.attemptSpec(attempt == maxAttempts-1, spec)
+
+ g.suite.currentSpecReport.EndTime = time.Now()
+ g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime)
+ g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes())
+ g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput()
+
+ if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
+ if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates | types.SpecStateSkipped) {
+ break
+ }
+ }
+ if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
+ if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
+ break
+ } else if attempt < maxAttempts-1 {
+ af := types.AdditionalFailure{State: g.suite.currentSpecReport.State, Failure: g.suite.currentSpecReport.Failure}
+ af.Failure.Message = fmt.Sprintf("Failure recorded during attempt %d:\n%s", attempt+1, af.Failure.Message)
+ g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, af)
+ }
+ }
+ }
+ }
+
+ g.suite.reportEach(spec, types.NodeTypeReportAfterEach)
+ g.suite.processCurrentSpecReport()
+ if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
+ g.succeeded = false
+ g.failedInARunOnceBefore = g.failedInARunOnceBefore || failedInARunOnceBefore
+ }
+ g.suite.selectiveLock.Lock()
+ g.suite.currentSpecReport = types.SpecReport{}
+ g.suite.selectiveLock.Unlock()
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
new file mode 100644
index 0000000..8ed8611
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
@@ -0,0 +1,177 @@
+package interrupt_handler
+
+import (
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+)
+
+var ABORT_POLLING_INTERVAL = 500 * time.Millisecond
+
+type InterruptCause uint
+
+const (
+ InterruptCauseInvalid InterruptCause = iota
+ InterruptCauseSignal
+ InterruptCauseAbortByOtherProcess
+)
+
+type InterruptLevel uint
+
+const (
+ InterruptLevelUninterrupted InterruptLevel = iota
+ InterruptLevelCleanupAndReport
+ InterruptLevelReportOnly
+ InterruptLevelBailOut
+)
+
+func (ic InterruptCause) String() string {
+ switch ic {
+ case InterruptCauseSignal:
+ return "Interrupted by User"
+ case InterruptCauseAbortByOtherProcess:
+ return "Interrupted by Other Ginkgo Process"
+ }
+ return "INVALID_INTERRUPT_CAUSE"
+}
+
+type InterruptStatus struct {
+ Channel chan interface{}
+ Level InterruptLevel
+ Cause InterruptCause
+}
+
+func (s InterruptStatus) Interrupted() bool {
+ return s.Level != InterruptLevelUninterrupted
+}
+
+func (s InterruptStatus) Message() string {
+ return s.Cause.String()
+}
+
+func (s InterruptStatus) ShouldIncludeProgressReport() bool {
+ return s.Cause != InterruptCauseAbortByOtherProcess
+}
+
+type InterruptHandlerInterface interface {
+ Status() InterruptStatus
+}
+
+type InterruptHandler struct {
+ c chan interface{}
+ lock *sync.Mutex
+ level InterruptLevel
+ cause InterruptCause
+ client parallel_support.Client
+ stop chan interface{}
+ signals []os.Signal
+ requestAbortCheck chan interface{}
+}
+
+func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler {
+ if len(signals) == 0 {
+ signals = []os.Signal{os.Interrupt, syscall.SIGTERM}
+ }
+ handler := &InterruptHandler{
+ c: make(chan interface{}),
+ lock: &sync.Mutex{},
+ stop: make(chan interface{}),
+ requestAbortCheck: make(chan interface{}),
+ client: client,
+ signals: signals,
+ }
+ handler.registerForInterrupts()
+ return handler
+}
+
+func (handler *InterruptHandler) Stop() {
+ close(handler.stop)
+}
+
+func (handler *InterruptHandler) registerForInterrupts() {
+ // os signal handling
+ signalChannel := make(chan os.Signal, 1)
+ signal.Notify(signalChannel, handler.signals...)
+
+ // cross-process abort handling
+ var abortChannel chan interface{}
+ if handler.client != nil {
+ abortChannel = make(chan interface{})
+ go func() {
+ pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL)
+ for {
+ select {
+ case <-pollTicker.C:
+ if handler.client.ShouldAbort() {
+ close(abortChannel)
+ pollTicker.Stop()
+ return
+ }
+ case <-handler.requestAbortCheck:
+ if handler.client.ShouldAbort() {
+ close(abortChannel)
+ pollTicker.Stop()
+ return
+ }
+ case <-handler.stop:
+ pollTicker.Stop()
+ return
+ }
+ }
+ }()
+ }
+
+ go func(abortChannel chan interface{}) {
+ var interruptCause InterruptCause
+ for {
+ select {
+ case <-signalChannel:
+ interruptCause = InterruptCauseSignal
+ case <-abortChannel:
+ interruptCause = InterruptCauseAbortByOtherProcess
+ case <-handler.stop:
+ signal.Stop(signalChannel)
+ return
+ }
+ abortChannel = nil
+
+ handler.lock.Lock()
+ oldLevel := handler.level
+ handler.cause = interruptCause
+ if handler.level == InterruptLevelUninterrupted {
+ handler.level = InterruptLevelCleanupAndReport
+ } else if handler.level == InterruptLevelCleanupAndReport {
+ handler.level = InterruptLevelReportOnly
+ } else if handler.level == InterruptLevelReportOnly {
+ handler.level = InterruptLevelBailOut
+ }
+ if handler.level != oldLevel {
+ close(handler.c)
+ handler.c = make(chan interface{})
+ }
+ handler.lock.Unlock()
+ }
+ }(abortChannel)
+}
+
+func (handler *InterruptHandler) Status() InterruptStatus {
+ handler.lock.Lock()
+ status := InterruptStatus{
+ Level: handler.level,
+ Channel: handler.c,
+ Cause: handler.cause,
+ }
+ handler.lock.Unlock()
+
+ if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() {
+ close(handler.requestAbortCheck)
+ <-status.Channel
+ return handler.Status()
+ }
+
+ return status
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
new file mode 100644
index 0000000..bf0de49
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
@@ -0,0 +1,15 @@
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
+// +build freebsd openbsd netbsd dragonfly darwin linux solaris
+
+package interrupt_handler
+
+import (
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+func SwallowSigQuit() {
+ c := make(chan os.Signal, 1024)
+ signal.Notify(c, syscall.SIGQUIT)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
new file mode 100644
index 0000000..fcf8da8
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
@@ -0,0 +1,8 @@
+//go:build windows
+// +build windows
+
+package interrupt_handler
+
+func SwallowSigQuit() {
+ //noop
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go
new file mode 100644
index 0000000..6a15f19
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go
@@ -0,0 +1,935 @@
+package internal
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var _global_node_id_counter = uint(0)
+var _global_id_mutex = &sync.Mutex{}
+
+func UniqueNodeID() uint {
+ // There's a reace in the internal integration tests if we don't make
+ // accessing _global_node_id_counter safe across goroutines.
+ _global_id_mutex.Lock()
+ defer _global_id_mutex.Unlock()
+ _global_node_id_counter += 1
+ return _global_node_id_counter
+}
+
+type Node struct {
+ ID uint
+ NodeType types.NodeType
+
+ Text string
+ Body func(SpecContext)
+ CodeLocation types.CodeLocation
+ NestingLevel int
+ HasContext bool
+
+ SynchronizedBeforeSuiteProc1Body func(SpecContext) []byte
+ SynchronizedBeforeSuiteProc1BodyHasContext bool
+ SynchronizedBeforeSuiteAllProcsBody func(SpecContext, []byte)
+ SynchronizedBeforeSuiteAllProcsBodyHasContext bool
+
+ SynchronizedAfterSuiteAllProcsBody func(SpecContext)
+ SynchronizedAfterSuiteAllProcsBodyHasContext bool
+ SynchronizedAfterSuiteProc1Body func(SpecContext)
+ SynchronizedAfterSuiteProc1BodyHasContext bool
+
+ ReportEachBody func(SpecContext, types.SpecReport)
+ ReportSuiteBody func(SpecContext, types.Report)
+
+ MarkedFocus bool
+ MarkedPending bool
+ MarkedSerial bool
+ MarkedOrdered bool
+ MarkedContinueOnFailure bool
+ MarkedOncePerOrdered bool
+ FlakeAttempts int
+ MustPassRepeatedly int
+ Labels Labels
+ PollProgressAfter time.Duration
+ PollProgressInterval time.Duration
+ NodeTimeout time.Duration
+ SpecTimeout time.Duration
+ GracePeriod time.Duration
+
+ NodeIDWhereCleanupWasGenerated uint
+}
+
+// Decoration Types
+type focusType bool
+type pendingType bool
+type serialType bool
+type orderedType bool
+type continueOnFailureType bool
+type honorsOrderedType bool
+type suppressProgressReporting bool
+
+const Focus = focusType(true)
+const Pending = pendingType(true)
+const Serial = serialType(true)
+const Ordered = orderedType(true)
+const ContinueOnFailure = continueOnFailureType(true)
+const OncePerOrdered = honorsOrderedType(true)
+const SuppressProgressReporting = suppressProgressReporting(true)
+
+type FlakeAttempts uint
+type MustPassRepeatedly uint
+type Offset uint
+type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing
+type Labels []string
+type PollProgressInterval time.Duration
+type PollProgressAfter time.Duration
+type NodeTimeout time.Duration
+type SpecTimeout time.Duration
+type GracePeriod time.Duration
+
+func (l Labels) MatchesLabelFilter(query string) bool {
+ return types.MustParseLabelFilter(query)(l)
+}
+
+func UnionOfLabels(labels ...Labels) Labels {
+ out := Labels{}
+ seen := map[string]bool{}
+ for _, labelSet := range labels {
+ for _, label := range labelSet {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+ }
+ return out
+}
+
+func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) {
+ decorations := []interface{}{}
+ remainingArgs := []interface{}{}
+ for _, arg := range args {
+ if isDecoration(arg) {
+ decorations = append(decorations, arg)
+ } else {
+ remainingArgs = append(remainingArgs, arg)
+ }
+ }
+ return decorations, remainingArgs
+}
+
+func isDecoration(arg interface{}) bool {
+ switch t := reflect.TypeOf(arg); {
+ case t == nil:
+ return false
+ case t == reflect.TypeOf(Offset(0)):
+ return true
+ case t == reflect.TypeOf(types.CodeLocation{}):
+ return true
+ case t == reflect.TypeOf(Focus):
+ return true
+ case t == reflect.TypeOf(Pending):
+ return true
+ case t == reflect.TypeOf(Serial):
+ return true
+ case t == reflect.TypeOf(Ordered):
+ return true
+ case t == reflect.TypeOf(ContinueOnFailure):
+ return true
+ case t == reflect.TypeOf(OncePerOrdered):
+ return true
+ case t == reflect.TypeOf(SuppressProgressReporting):
+ return true
+ case t == reflect.TypeOf(FlakeAttempts(0)):
+ return true
+ case t == reflect.TypeOf(MustPassRepeatedly(0)):
+ return true
+ case t == reflect.TypeOf(Labels{}):
+ return true
+ case t == reflect.TypeOf(PollProgressInterval(0)):
+ return true
+ case t == reflect.TypeOf(PollProgressAfter(0)):
+ return true
+ case t == reflect.TypeOf(NodeTimeout(0)):
+ return true
+ case t == reflect.TypeOf(SpecTimeout(0)):
+ return true
+ case t == reflect.TypeOf(GracePeriod(0)):
+ return true
+ case t.Kind() == reflect.Slice && isSliceOfDecorations(arg):
+ return true
+ default:
+ return false
+ }
+}
+
+func isSliceOfDecorations(slice interface{}) bool {
+ vSlice := reflect.ValueOf(slice)
+ if vSlice.Len() == 0 {
+ return false
+ }
+ for i := 0; i < vSlice.Len(); i++ {
+ if !isDecoration(vSlice.Index(i).Interface()) {
+ return false
+ }
+ }
+ return true
+}
+
+var contextType = reflect.TypeOf(new(context.Context)).Elem()
+var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
+
+func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) {
+ baseOffset := 2
+ node := Node{
+ ID: UniqueNodeID(),
+ NodeType: nodeType,
+ Text: text,
+ Labels: Labels{},
+ CodeLocation: types.NewCodeLocation(baseOffset),
+ NestingLevel: -1,
+ PollProgressAfter: -1,
+ PollProgressInterval: -1,
+ GracePeriod: -1,
+ }
+
+ errors := []error{}
+ appendError := func(err error) {
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ args = unrollInterfaceSlice(args)
+
+ remainingArgs := []interface{}{}
+ // First get the CodeLocation up-to-date
+ for _, arg := range args {
+ switch v := arg.(type) {
+ case Offset:
+ node.CodeLocation = types.NewCodeLocation(baseOffset + int(v))
+ case types.CodeLocation:
+ node.CodeLocation = v
+ default:
+ remainingArgs = append(remainingArgs, arg)
+ }
+ }
+
+ labelsSeen := map[string]bool{}
+ trackedFunctionError := false
+ args = remainingArgs
+ remainingArgs = []interface{}{}
+ // now process the rest of the args
+ for _, arg := range args {
+ switch t := reflect.TypeOf(arg); {
+ case t == reflect.TypeOf(float64(0)):
+ break // ignore deprecated timeouts
+ case t == reflect.TypeOf(Focus):
+ node.MarkedFocus = bool(arg.(focusType))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Focus"))
+ }
+ case t == reflect.TypeOf(Pending):
+ node.MarkedPending = bool(arg.(pendingType))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Pending"))
+ }
+ case t == reflect.TypeOf(Serial):
+ node.MarkedSerial = bool(arg.(serialType))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Serial"))
+ }
+ case t == reflect.TypeOf(Ordered):
+ node.MarkedOrdered = bool(arg.(orderedType))
+ if !nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered"))
+ }
+ case t == reflect.TypeOf(ContinueOnFailure):
+ node.MarkedContinueOnFailure = bool(arg.(continueOnFailureType))
+ if !nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ContinueOnFailure"))
+ }
+ case t == reflect.TypeOf(OncePerOrdered):
+ node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType))
+ if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered"))
+ }
+ case t == reflect.TypeOf(SuppressProgressReporting):
+ deprecationTracker.TrackDeprecation(types.Deprecations.SuppressProgressReporting())
+ case t == reflect.TypeOf(FlakeAttempts(0)):
+ node.FlakeAttempts = int(arg.(FlakeAttempts))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts"))
+ }
+ case t == reflect.TypeOf(MustPassRepeatedly(0)):
+ node.MustPassRepeatedly = int(arg.(MustPassRepeatedly))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "MustPassRepeatedly"))
+ }
+ case t == reflect.TypeOf(PollProgressAfter(0)):
+ node.PollProgressAfter = time.Duration(arg.(PollProgressAfter))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressAfter"))
+ }
+ case t == reflect.TypeOf(PollProgressInterval(0)):
+ node.PollProgressInterval = time.Duration(arg.(PollProgressInterval))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressInterval"))
+ }
+ case t == reflect.TypeOf(NodeTimeout(0)):
+ node.NodeTimeout = time.Duration(arg.(NodeTimeout))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "NodeTimeout"))
+ }
+ case t == reflect.TypeOf(SpecTimeout(0)):
+ node.SpecTimeout = time.Duration(arg.(SpecTimeout))
+ if !nodeType.Is(types.NodeTypeIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecTimeout"))
+ }
+ case t == reflect.TypeOf(GracePeriod(0)):
+ node.GracePeriod = time.Duration(arg.(GracePeriod))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod"))
+ }
+ case t == reflect.TypeOf(Labels{}):
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label"))
+ }
+ for _, label := range arg.(Labels) {
+ if !labelsSeen[label] {
+ labelsSeen[label] = true
+ label, err := types.ValidateAndCleanupLabel(label, node.CodeLocation)
+ node.Labels = append(node.Labels, label)
+ appendError(err)
+ }
+ }
+ case t.Kind() == reflect.Func:
+ if nodeType.Is(types.NodeTypeContainer) {
+ if node.Body != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ if t.NumOut() > 0 || t.NumIn() > 0 {
+ appendError(types.GinkgoErrors.InvalidBodyTypeForContainer(t, node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ body := arg.(func())
+ node.Body = func(SpecContext) { body() }
+ } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) {
+ if node.ReportEachBody == nil {
+ if fn, ok := arg.(func(types.SpecReport)); ok {
+ node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) }
+ } else {
+ node.ReportEachBody = arg.(func(SpecContext, types.SpecReport))
+ node.HasContext = true
+ }
+ } else {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
+ if node.ReportSuiteBody == nil {
+ if fn, ok := arg.(func(types.Report)); ok {
+ node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) }
+ } else {
+ node.ReportSuiteBody = arg.(func(SpecContext, types.Report))
+ node.HasContext = true
+ }
+ } else {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ } else if nodeType.Is(types.NodeTypeSynchronizedBeforeSuite) {
+ if node.SynchronizedBeforeSuiteProc1Body != nil && node.SynchronizedBeforeSuiteAllProcsBody != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ if node.SynchronizedBeforeSuiteProc1Body == nil {
+ body, hasContext := extractSynchronizedBeforeSuiteProc1Body(arg)
+ if body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t, node.CodeLocation))
+ trackedFunctionError = true
+ }
+ node.SynchronizedBeforeSuiteProc1Body, node.SynchronizedBeforeSuiteProc1BodyHasContext = body, hasContext
+ } else if node.SynchronizedBeforeSuiteAllProcsBody == nil {
+ body, hasContext := extractSynchronizedBeforeSuiteAllProcsBody(arg)
+ if body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t, node.CodeLocation))
+ trackedFunctionError = true
+ }
+ node.SynchronizedBeforeSuiteAllProcsBody, node.SynchronizedBeforeSuiteAllProcsBodyHasContext = body, hasContext
+ }
+ } else if nodeType.Is(types.NodeTypeSynchronizedAfterSuite) {
+ if node.SynchronizedAfterSuiteAllProcsBody != nil && node.SynchronizedAfterSuiteProc1Body != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ body, hasContext := extractBodyFunction(deprecationTracker, node.CodeLocation, arg)
+ if body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ if node.SynchronizedAfterSuiteAllProcsBody == nil {
+ node.SynchronizedAfterSuiteAllProcsBody, node.SynchronizedAfterSuiteAllProcsBodyHasContext = body, hasContext
+ } else if node.SynchronizedAfterSuiteProc1Body == nil {
+ node.SynchronizedAfterSuiteProc1Body, node.SynchronizedAfterSuiteProc1BodyHasContext = body, hasContext
+ }
+ } else {
+ if node.Body != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ node.Body, node.HasContext = extractBodyFunction(deprecationTracker, node.CodeLocation, arg)
+ if node.Body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ }
+ default:
+ remainingArgs = append(remainingArgs, arg)
+ }
+ }
+
+ // validations
+ if node.MarkedPending && node.MarkedFocus {
+ appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
+ }
+
+ if node.MarkedContinueOnFailure && !node.MarkedOrdered {
+ appendError(types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation))
+ }
+
+ hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext
+
+ if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 {
+ appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType))
+ }
+
+ if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportBeforeSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError {
+ appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
+ }
+
+ if node.NodeType.Is(types.NodeTypeSynchronizedBeforeSuite) && !trackedFunctionError && (node.SynchronizedBeforeSuiteProc1Body == nil || node.SynchronizedBeforeSuiteAllProcsBody == nil) {
+ appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
+ }
+
+ if node.NodeType.Is(types.NodeTypeSynchronizedAfterSuite) && !trackedFunctionError && (node.SynchronizedAfterSuiteProc1Body == nil || node.SynchronizedAfterSuiteAllProcsBody == nil) {
+ appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
+ }
+
+ for _, arg := range remainingArgs {
+ appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg))
+ }
+
+ if node.FlakeAttempts > 0 && node.MustPassRepeatedly > 0 {
+ appendError(types.GinkgoErrors.InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(node.CodeLocation, nodeType))
+ }
+
+ if len(errors) > 0 {
+ return Node{}, errors
+ }
+
+ return node, errors
+}
+
+var doneType = reflect.TypeOf(make(Done))
+
+func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) {
+ t := reflect.TypeOf(arg)
+ if t.NumOut() > 0 || t.NumIn() > 1 {
+ return nil, false
+ }
+ if t.NumIn() == 1 {
+ if t.In(0) == doneType {
+ deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl)
+ deprecatedAsyncBody := arg.(func(Done))
+ return func(SpecContext) { deprecatedAsyncBody(make(Done)) }, false
+ } else if t.In(0).Implements(specContextType) {
+ return arg.(func(SpecContext)), true
+ } else if t.In(0).Implements(contextType) {
+ body := arg.(func(context.Context))
+ return func(c SpecContext) { body(c) }, true
+ }
+
+ return nil, false
+ }
+
+ body := arg.(func())
+ return func(SpecContext) { body() }, false
+}
+
+var byteType = reflect.TypeOf([]byte{})
+
+func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) {
+ t := reflect.TypeOf(arg)
+ v := reflect.ValueOf(arg)
+
+ if t.NumOut() > 1 || t.NumIn() > 1 {
+ return nil, false
+ } else if t.NumOut() == 1 && t.Out(0) != byteType {
+ return nil, false
+ } else if t.NumIn() == 1 && !t.In(0).Implements(contextType) {
+ return nil, false
+ }
+ hasContext := t.NumIn() == 1
+
+ return func(c SpecContext) []byte {
+ var out []reflect.Value
+ if hasContext {
+ out = v.Call([]reflect.Value{reflect.ValueOf(c)})
+ } else {
+ out = v.Call([]reflect.Value{})
+ }
+ if len(out) == 1 {
+ return (out[0].Interface()).([]byte)
+ } else {
+ return []byte{}
+ }
+ }, hasContext
+}
+
+func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) {
+ t := reflect.TypeOf(arg)
+ v := reflect.ValueOf(arg)
+ hasContext, hasByte := false, false
+
+ if t.NumOut() > 0 || t.NumIn() > 2 {
+ return nil, false
+ } else if t.NumIn() == 2 && t.In(0).Implements(contextType) && t.In(1) == byteType {
+ hasContext, hasByte = true, true
+ } else if t.NumIn() == 1 && t.In(0).Implements(contextType) {
+ hasContext = true
+ } else if t.NumIn() == 1 && t.In(0) == byteType {
+ hasByte = true
+ } else if t.NumIn() != 0 {
+ return nil, false
+ }
+
+ return func(c SpecContext, b []byte) {
+ in := []reflect.Value{}
+ if hasContext {
+ in = append(in, reflect.ValueOf(c))
+ }
+ if hasByte {
+ in = append(in, reflect.ValueOf(b))
+ }
+ v.Call(in)
+ }, hasContext
+}
+
+var errInterface = reflect.TypeOf((*error)(nil)).Elem()
+
+func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) {
+ decorations, remainingArgs := PartitionDecorations(args...)
+ baseOffset := 2
+ cl := types.NewCodeLocation(baseOffset)
+ finalArgs := []interface{}{}
+ for _, arg := range decorations {
+ switch t := reflect.TypeOf(arg); {
+ case t == reflect.TypeOf(Offset(0)):
+ cl = types.NewCodeLocation(baseOffset + int(arg.(Offset)))
+ case t == reflect.TypeOf(types.CodeLocation{}):
+ cl = arg.(types.CodeLocation)
+ default:
+ finalArgs = append(finalArgs, arg)
+ }
+ }
+ finalArgs = append(finalArgs, cl)
+
+ if len(remainingArgs) == 0 {
+ return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)}
+ }
+
+ callback := reflect.ValueOf(remainingArgs[0])
+ if !(callback.Kind() == reflect.Func) {
+ return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)}
+ }
+
+ callArgs := []reflect.Value{}
+ for _, arg := range remainingArgs[1:] {
+ callArgs = append(callArgs, reflect.ValueOf(arg))
+ }
+
+ hasContext := false
+ t := callback.Type()
+ if t.NumIn() > 0 {
+ if t.In(0).Implements(specContextType) {
+ hasContext = true
+ } else if t.In(0).Implements(contextType) && (len(callArgs) == 0 || !callArgs[0].Type().Implements(contextType)) {
+ hasContext = true
+ }
+ }
+
+ handleFailure := func(out []reflect.Value) {
+ if len(out) == 0 {
+ return
+ }
+ last := out[len(out)-1]
+ if last.Type().Implements(errInterface) && !last.IsNil() {
+ fail(fmt.Sprintf("DeferCleanup callback returned error: %v", last), cl)
+ }
+ }
+
+ if hasContext {
+ finalArgs = append(finalArgs, func(c SpecContext) {
+ out := callback.Call(append([]reflect.Value{reflect.ValueOf(c)}, callArgs...))
+ handleFailure(out)
+ })
+ } else {
+ finalArgs = append(finalArgs, func() {
+ out := callback.Call(callArgs)
+ handleFailure(out)
+ })
+ }
+
+ return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...)
+}
+
+func (n Node) IsZero() bool {
+ return n.ID == 0
+}
+
+/* Nodes */
+type Nodes []Node
+
+func (n Nodes) Clone() Nodes {
+ nodes := make(Nodes, len(n))
+ copy(nodes, n)
+ return nodes
+}
+
+func (n Nodes) CopyAppend(nodes ...Node) Nodes {
+ numN := len(n)
+ out := make(Nodes, numN+len(nodes))
+ copy(out, n)
+ for j, node := range nodes {
+ out[numN+j] = node
+ }
+ return out
+}
+
+func (n Nodes) SplitAround(pivot Node) (Nodes, Nodes) {
+ pivotIdx := len(n)
+ for i := range n {
+ if n[i].ID == pivot.ID {
+ pivotIdx = i
+ break
+ }
+ }
+ left := n[:pivotIdx]
+ right := Nodes{}
+ if pivotIdx+1 < len(n) {
+ right = n[pivotIdx+1:]
+ }
+
+ return left, right
+}
+
+func (n Nodes) FirstNodeWithType(nodeTypes types.NodeType) Node {
+ for i := range n {
+ if n[i].NodeType.Is(nodeTypes) {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) WithType(nodeTypes types.NodeType) Nodes {
+ count := 0
+ for i := range n {
+ if n[i].NodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if n[i].NodeType.Is(nodeTypes) {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) WithoutType(nodeTypes types.NodeType) Nodes {
+ count := 0
+ for i := range n {
+ if !n[i].NodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if !n[i].NodeType.Is(nodeTypes) {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) WithoutNode(nodeToExclude Node) Nodes {
+ idxToExclude := len(n)
+ for i := range n {
+ if n[i].ID == nodeToExclude.ID {
+ idxToExclude = i
+ break
+ }
+ }
+ if idxToExclude == len(n) {
+ return n
+ }
+ out, j := make(Nodes, len(n)-1), 0
+ for i := range n {
+ if i == idxToExclude {
+ continue
+ }
+ out[j] = n[i]
+ j++
+ }
+ return out
+}
+
+func (n Nodes) Filter(filter func(Node) bool) Nodes {
+ trufa, count := make([]bool, len(n)), 0
+ for i := range n {
+ if filter(n[i]) {
+ trufa[i] = true
+ count += 1
+ }
+ }
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if trufa[i] {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) FirstSatisfying(filter func(Node) bool) Node {
+ for i := range n {
+ if filter(n[i]) {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) WithinNestingLevel(deepestNestingLevel int) Nodes {
+ count := 0
+ for i := range n {
+ if n[i].NestingLevel <= deepestNestingLevel {
+ count++
+ }
+ }
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if n[i].NestingLevel <= deepestNestingLevel {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) SortedByDescendingNestingLevel() Nodes {
+ out := make(Nodes, len(n))
+ copy(out, n)
+ sort.SliceStable(out, func(i int, j int) bool {
+ return out[i].NestingLevel > out[j].NestingLevel
+ })
+
+ return out
+}
+
+func (n Nodes) SortedByAscendingNestingLevel() Nodes {
+ out := make(Nodes, len(n))
+ copy(out, n)
+ sort.SliceStable(out, func(i int, j int) bool {
+ return out[i].NestingLevel < out[j].NestingLevel
+ })
+
+ return out
+}
+
+func (n Nodes) FirstWithNestingLevel(level int) Node {
+ for i := range n {
+ if n[i].NestingLevel == level {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) Reverse() Nodes {
+ out := make(Nodes, len(n))
+ for i := range n {
+ out[len(n)-1-i] = n[i]
+ }
+ return out
+}
+
+func (n Nodes) Texts() []string {
+ out := make([]string, len(n))
+ for i := range n {
+ out[i] = n[i].Text
+ }
+ return out
+}
+
+func (n Nodes) Labels() [][]string {
+ out := make([][]string, len(n))
+ for i := range n {
+ if n[i].Labels == nil {
+ out[i] = []string{}
+ } else {
+ out[i] = []string(n[i].Labels)
+ }
+ }
+ return out
+}
+
+func (n Nodes) UnionOfLabels() []string {
+ out := []string{}
+ seen := map[string]bool{}
+ for i := range n {
+ for _, label := range n[i].Labels {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+ }
+ return out
+}
+
+func (n Nodes) CodeLocations() []types.CodeLocation {
+ out := make([]types.CodeLocation, len(n))
+ for i := range n {
+ out[i] = n[i].CodeLocation
+ }
+ return out
+}
+
+func (n Nodes) BestTextFor(node Node) string {
+ if node.Text != "" {
+ return node.Text
+ }
+ parentNestingLevel := node.NestingLevel - 1
+ for i := range n {
+ if n[i].Text != "" && n[i].NestingLevel == parentNestingLevel {
+ return n[i].Text
+ }
+ }
+
+ return ""
+}
+
+func (n Nodes) ContainsNodeID(id uint) bool {
+ for i := range n {
+ if n[i].ID == id {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) HasNodeMarkedPending() bool {
+ for i := range n {
+ if n[i].MarkedPending {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) HasNodeMarkedFocus() bool {
+ for i := range n {
+ if n[i].MarkedFocus {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) HasNodeMarkedSerial() bool {
+ for i := range n {
+ if n[i].MarkedSerial {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) FirstNodeMarkedOrdered() Node {
+ for i := range n {
+ if n[i].MarkedOrdered {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) IndexOfFirstNodeMarkedOrdered() int {
+ for i := range n {
+ if n[i].MarkedOrdered {
+ return i
+ }
+ }
+ return -1
+}
+
+func (n Nodes) GetMaxFlakeAttempts() int {
+ maxFlakeAttempts := 0
+ for i := range n {
+ if n[i].FlakeAttempts > 0 {
+ maxFlakeAttempts = n[i].FlakeAttempts
+ }
+ }
+ return maxFlakeAttempts
+}
+
+func (n Nodes) GetMaxMustPassRepeatedly() int {
+ maxMustPassRepeatedly := 0
+ for i := range n {
+ if n[i].MustPassRepeatedly > 0 {
+ maxMustPassRepeatedly = n[i].MustPassRepeatedly
+ }
+ }
+ return maxMustPassRepeatedly
+}
+
+func unrollInterfaceSlice(args interface{}) []interface{} {
+ v := reflect.ValueOf(args)
+ if v.Kind() != reflect.Slice {
+ return []interface{}{args}
+ }
+ out := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ el := reflect.ValueOf(v.Index(i).Interface())
+ if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) {
+ out = append(out, unrollInterfaceSlice(el.Interface())...)
+ } else {
+ out = append(out, v.Index(i).Interface())
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
new file mode 100644
index 0000000..84eea0a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
@@ -0,0 +1,171 @@
+package internal
+
+import (
+ "math/rand"
+ "sort"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type SortableSpecs struct {
+ Specs Specs
+ Indexes []int
+}
+
+func NewSortableSpecs(specs Specs) *SortableSpecs {
+ indexes := make([]int, len(specs))
+ for i := range specs {
+ indexes[i] = i
+ }
+ return &SortableSpecs{
+ Specs: specs,
+ Indexes: indexes,
+ }
+}
+func (s *SortableSpecs) Len() int { return len(s.Indexes) }
+func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] }
+func (s *SortableSpecs) Less(i, j int) bool {
+ a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]]
+
+ aNodes, bNodes := a.Nodes.WithType(types.NodeTypesForContainerAndIt), b.Nodes.WithType(types.NodeTypesForContainerAndIt)
+
+ firstOrderedAIdx, firstOrderedBIdx := aNodes.IndexOfFirstNodeMarkedOrdered(), bNodes.IndexOfFirstNodeMarkedOrdered()
+ if firstOrderedAIdx > -1 && firstOrderedBIdx > -1 && aNodes[firstOrderedAIdx].ID == bNodes[firstOrderedBIdx].ID {
+ // strictly preserve order within an ordered containers. ID will track this as IDs are generated monotonically
+ return aNodes.FirstNodeWithType(types.NodeTypeIt).ID < bNodes.FirstNodeWithType(types.NodeTypeIt).ID
+ }
+
+ // if either spec is in an ordered container - only use the nodes up to the outermost ordered container
+ if firstOrderedAIdx > -1 {
+ aNodes = aNodes[:firstOrderedAIdx+1]
+ }
+ if firstOrderedBIdx > -1 {
+ bNodes = bNodes[:firstOrderedBIdx+1]
+ }
+
+ for i := 0; i < len(aNodes) && i < len(bNodes); i++ {
+ aCL, bCL := aNodes[i].CodeLocation, bNodes[i].CodeLocation
+ if aCL.FileName != bCL.FileName {
+ return aCL.FileName < bCL.FileName
+ }
+ if aCL.LineNumber != bCL.LineNumber {
+ return aCL.LineNumber < bCL.LineNumber
+ }
+ }
+ // either everything is equal or we have different lengths of CLs
+ if len(aNodes) != len(bNodes) {
+ return len(aNodes) < len(bNodes)
+ }
+ // ok, now we are sure everything was equal. so we use the spec text to break ties
+ for i := 0; i < len(aNodes); i++ {
+ if aNodes[i].Text != bNodes[i].Text {
+ return aNodes[i].Text < bNodes[i].Text
+ }
+ }
+ // ok, all those texts were equal. we'll use the ID of the most deeply nested node as a last resort
+ return aNodes[len(aNodes)-1].ID < bNodes[len(bNodes)-1].ID
+}
+
+type GroupedSpecIndices []SpecIndices
+type SpecIndices []int
+
+func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) {
+ /*
+ Ginkgo has sophisticated support for randomizing specs. Specs are guaranteed to have the same
+ order for a given seed across test runs.
+
+ By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging
+ experience - specs within a given container run in the order they appear in the file.
+
+ Developers can set -randomizeAllSpecs to shuffle _all_ specs.
+
+ In addition, spec containers can be marked as Ordered. Specs within an Ordered container are never shuffled.
+
+ Finally, specs and spec containers can be marked as Serial. When running in parallel, serial specs run on Process #1 _after_ all other processes have finished.
+ */
+
+ // Seed a new random source based on thee configured random seed.
+ r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
+
+ // first, we sort the entire suite to ensure a deterministic order. the sort is performed by filename, then line number, and then spec text. this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs.
+ sortableSpecs := NewSortableSpecs(specs)
+ sort.Sort(sortableSpecs)
+
+ // then we break things into execution groups
+ // a group represents a single unit of execution and is a collection of SpecIndices
+ // usually a group is just a single spec, however ordered containers must be preserved as a single group
+ executionGroupIDs := []uint{}
+ executionGroups := map[uint]SpecIndices{}
+ for _, idx := range sortableSpecs.Indexes {
+ spec := specs[idx]
+ groupNode := spec.Nodes.FirstNodeMarkedOrdered()
+ if groupNode.IsZero() {
+ groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt)
+ }
+ executionGroups[groupNode.ID] = append(executionGroups[groupNode.ID], idx)
+ if len(executionGroups[groupNode.ID]) == 1 {
+ executionGroupIDs = append(executionGroupIDs, groupNode.ID)
+ }
+ }
+
+ // now, we only shuffle all the execution groups if we're randomizing all specs, otherwise
+ // we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs
+ shufflableGroupingIDs := []uint{}
+ shufflableGroupingIDToGroupIDs := map[uint][]uint{}
+
+ // for each execution group we're going to have to pick a node to represent how the
+ // execution group is grouped for shuffling:
+ nodeTypesToShuffle := types.NodeTypesForContainerAndIt
+ if suiteConfig.RandomizeAllSpecs {
+ nodeTypesToShuffle = types.NodeTypeIt
+ }
+
+ //so, for each execution group:
+ for _, groupID := range executionGroupIDs {
+ // pick out a representative spec
+ representativeSpec := specs[executionGroups[groupID][0]]
+
+ // and grab the node on the spec that will represent which shufflable group this execution group belongs tu
+ shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle)
+
+ //add the execution group to its shufflable group
+ shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID] = append(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID], groupID)
+
+ //and if it's the first one in
+ if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 {
+ // record the shuffleable group ID
+ shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID)
+ }
+ }
+
+ // now we permute the sorted shufflable grouping IDs and build the ordered Groups
+ orderedGroups := GroupedSpecIndices{}
+ permutation := r.Perm(len(shufflableGroupingIDs))
+ for _, j := range permutation {
+ //let's get the execution group IDs for this shufflable group:
+ executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]]
+ // and we'll add their associated specindices to the orderedGroups slice:
+ for _, executionGroupID := range executionGroupIDsForJ {
+ orderedGroups = append(orderedGroups, executionGroups[executionGroupID])
+ }
+ }
+
+ // If we're running in series, we're done.
+ if suiteConfig.ParallelTotal == 1 {
+ return orderedGroups, GroupedSpecIndices{}
+ }
+
+ // We're running in parallel so we need to partition the ordered groups into a parallelizable set and a serialized set.
+ // The parallelizable groups will run across all Ginkgo processes...
+ // ...the serial groups will only run on Process #1 after all other processes have exited.
+ parallelizableGroups, serialGroups := GroupedSpecIndices{}, GroupedSpecIndices{}
+ for _, specIndices := range orderedGroups {
+ if specs[specIndices[0]].Nodes.HasNodeMarkedSerial() {
+ serialGroups = append(serialGroups, specIndices)
+ } else {
+ parallelizableGroups = append(parallelizableGroups, specIndices)
+ }
+ }
+
+ return parallelizableGroups, serialGroups
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
new file mode 100644
index 0000000..4a1c094
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
@@ -0,0 +1,250 @@
+package internal
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "time"
+)
+
+const BAILOUT_TIME = 1 * time.Second
+const BAILOUT_MESSAGE = `Ginkgo detected an issue while intercepting output.
+
+When running in parallel, Ginkgo captures stdout and stderr output
+and attaches it to the running spec. It looks like that process is getting
+stuck for this suite.
+
+This usually happens if you, or a library you are using, spin up an external
+process and set cmd.Stdout = os.Stdout and/or cmd.Stderr = os.Stderr. This
+causes the external process to keep Ginkgo's output interceptor pipe open and
+causes output interception to hang.
+
+Ginkgo has detected this and shortcircuited the capture process. The specs
+will continue running after this message however output from the external
+process that caused this issue will not be captured.
+
+You have several options to fix this. In preferred order they are:
+
+1. Pass GinkgoWriter instead of os.Stdout or os.Stderr to your process.
+2. Ensure your process exits before the current spec completes. If your
+process is long-lived and must cross spec boundaries, this option won't
+work for you.
+3. Pause Ginkgo's output interceptor before starting your process and then
+resume it after. Use PauseOutputInterception() and ResumeOutputInterception()
+to do this.
+4. Set --output-interceptor-mode=none when running your Ginkgo suite. This will
+turn off all output interception but allow specs to run in parallel without this
+issue. You may miss important output if you do this including output from Go's
+race detector.
+
+More details on issue #851 - https://github.com/onsi/ginkgo/issues/851
+`
+
+/*
+The OutputInterceptor is used by to
+intercept and capture all stdin and stderr output during a test run.
+*/
+type OutputInterceptor interface {
+ StartInterceptingOutput()
+ StartInterceptingOutputAndForwardTo(io.Writer)
+ StopInterceptingAndReturnOutput() string
+
+ PauseIntercepting()
+ ResumeIntercepting()
+
+ Shutdown()
+}
+
+type NoopOutputInterceptor struct{}
+
+func (interceptor NoopOutputInterceptor) StartInterceptingOutput() {}
+func (interceptor NoopOutputInterceptor) StartInterceptingOutputAndForwardTo(io.Writer) {}
+func (interceptor NoopOutputInterceptor) StopInterceptingAndReturnOutput() string { return "" }
+func (interceptor NoopOutputInterceptor) PauseIntercepting() {}
+func (interceptor NoopOutputInterceptor) ResumeIntercepting() {}
+func (interceptor NoopOutputInterceptor) Shutdown() {}
+
+type pipePair struct {
+ reader *os.File
+ writer *os.File
+}
+
+func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) {
+ for {
+ //make the next pipe...
+ pair := pipePair{}
+ pair.reader, pair.writer, _ = os.Pipe()
+ select {
+ //...and provide it to the next consumer (they are responsible for closing the files)
+ case pipeChannel <- pair:
+ continue
+ //...or close the files if we were told to shutdown
+ case <-shutdown:
+ pair.reader.Close()
+ pair.writer.Close()
+ return
+ }
+ }
+}
+
+type interceptorImplementation interface {
+ CreateStdoutStderrClones() (*os.File, *os.File)
+ ConnectPipeToStdoutStderr(*os.File)
+ RestoreStdoutStderrFromClones(*os.File, *os.File)
+ ShutdownClones(*os.File, *os.File)
+}
+
+type genericOutputInterceptor struct {
+ intercepting bool
+
+ stdoutClone *os.File
+ stderrClone *os.File
+ pipe pipePair
+
+ shutdown chan interface{}
+ emergencyBailout chan interface{}
+ pipeChannel chan pipePair
+ interceptedContent chan string
+
+ forwardTo io.Writer
+ accumulatedOutput string
+
+ implementation interceptorImplementation
+}
+
+func (interceptor *genericOutputInterceptor) StartInterceptingOutput() {
+ interceptor.StartInterceptingOutputAndForwardTo(io.Discard)
+}
+
+func (interceptor *genericOutputInterceptor) StartInterceptingOutputAndForwardTo(w io.Writer) {
+ if interceptor.intercepting {
+ return
+ }
+ interceptor.accumulatedOutput = ""
+ interceptor.forwardTo = w
+ interceptor.ResumeIntercepting()
+}
+
+func (interceptor *genericOutputInterceptor) StopInterceptingAndReturnOutput() string {
+ if interceptor.intercepting {
+ interceptor.PauseIntercepting()
+ }
+ return interceptor.accumulatedOutput
+}
+
+func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
+ if interceptor.intercepting {
+ return
+ }
+ interceptor.intercepting = true
+ if interceptor.stdoutClone == nil {
+ interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones()
+ interceptor.shutdown = make(chan interface{})
+ go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown)
+ }
+
+ // Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is string to log to stdout and stderr)
+ // we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running
+ interceptor.pipe = <-interceptor.pipeChannel
+
+ interceptor.emergencyBailout = make(chan interface{})
+
+ //Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting
+ go func() {
+ buffer := &bytes.Buffer{}
+ destination := io.MultiWriter(buffer, interceptor.forwardTo)
+ copyFinished := make(chan interface{})
+ reader := interceptor.pipe.reader
+ go func() {
+ io.Copy(destination, reader)
+ reader.Close() // close the read end of the pipe so we don't leak a file descriptor
+ close(copyFinished)
+ }()
+ select {
+ case <-copyFinished:
+ interceptor.interceptedContent <- buffer.String()
+ case <-interceptor.emergencyBailout:
+ interceptor.interceptedContent <- ""
+ }
+ }()
+
+ interceptor.implementation.ConnectPipeToStdoutStderr(interceptor.pipe.writer)
+}
+
+func (interceptor *genericOutputInterceptor) PauseIntercepting() {
+ if !interceptor.intercepting {
+ return
+ }
+ // first we have to close the write end of the pipe. To do this we have to close all file descriptors pointing
+ // to the write end. So that would be the pipewriter itself, and FD #1 and FD #2 if we've Dup2'd them
+ interceptor.pipe.writer.Close() // the pipewriter itself
+
+ // we also need to stop intercepting. we do that by reconnecting the stdout and stderr file descriptions back to their respective #1 and #2 file descriptors;
+ // this also closes #1 and #2 before it points that their original stdout and stderr file descriptions
+ interceptor.implementation.RestoreStdoutStderrFromClones(interceptor.stdoutClone, interceptor.stderrClone)
+
+ var content string
+ select {
+ case content = <-interceptor.interceptedContent:
+ case <-time.After(BAILOUT_TIME):
+ /*
+ By closing all the pipe writer's file descriptors associated with the pipe writer's file description the io.Copy reading from the reader
+ should eventually receive an EOF and exit.
+
+ **However**, if the user has spun up an external process and passed in os.Stdout/os.Stderr to cmd.Stdout/cmd.Stderr then the external process
+ will have a file descriptor pointing to the pipe writer's file description and it will not close until the external process exits.
+
+ That would leave us hanging here waiting for the io.Copy to close forever. Instead we invoke this emergency escape valve. This returns whatever
+ content we've got but leaves the io.Copy running. This ensures the external process can continue writing without hanging at the cost of leaking a goroutine
+ and file descriptor (those these will be cleaned up when the process exits).
+
+ We tack on a message to notify the user that they've hit this edgecase and encourage them to address it.
+ */
+ close(interceptor.emergencyBailout)
+ content = <-interceptor.interceptedContent + BAILOUT_MESSAGE
+ }
+
+ interceptor.accumulatedOutput += content
+ interceptor.intercepting = false
+}
+
+func (interceptor *genericOutputInterceptor) Shutdown() {
+ interceptor.PauseIntercepting()
+
+ if interceptor.stdoutClone != nil {
+ close(interceptor.shutdown)
+ interceptor.implementation.ShutdownClones(interceptor.stdoutClone, interceptor.stderrClone)
+ interceptor.stdoutClone = nil
+ interceptor.stderrClone = nil
+ }
+}
+
+/* This is used on windows builds but included here so it can be explicitly tested on unix systems too */
+func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor {
+ return &genericOutputInterceptor{
+ interceptedContent: make(chan string),
+ pipeChannel: make(chan pipePair),
+ shutdown: make(chan interface{}),
+ implementation: &osGlobalReassigningOutputInterceptorImpl{},
+ }
+}
+
+type osGlobalReassigningOutputInterceptorImpl struct{}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) {
+ return os.Stdout, os.Stderr
+}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) {
+ os.Stdout = pipeWriter
+ os.Stderr = pipeWriter
+}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) {
+ os.Stdout = stdoutClone
+ os.Stderr = stderrClone
+}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) ShutdownClones(_ *os.File, _ *os.File) {
+ //noop
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
new file mode 100644
index 0000000..8a237f4
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
@@ -0,0 +1,73 @@
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
+// +build freebsd openbsd netbsd dragonfly darwin linux solaris
+
+package internal
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+func NewOutputInterceptor() OutputInterceptor {
+ return &genericOutputInterceptor{
+ interceptedContent: make(chan string),
+ pipeChannel: make(chan pipePair),
+ shutdown: make(chan interface{}),
+ implementation: &dupSyscallOutputInterceptorImpl{},
+ }
+}
+
+type dupSyscallOutputInterceptorImpl struct{}
+
+func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) {
+ // To clone stdout and stderr we:
+ // First, create two clone file descriptors that point to the stdout and stderr file descriptions
+ stdoutCloneFD, _ := unix.Dup(1)
+ stderrCloneFD, _ := unix.Dup(2)
+
+ // Important, set the fds to FD_CLOEXEC to prevent them leaking into childs
+ // https://github.com/onsi/ginkgo/issues/1191
+ flags, err := unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_GETFD, 0)
+ if err == nil {
+ unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC)
+ }
+ flags, err = unix.FcntlInt(uintptr(stderrCloneFD), unix.F_GETFD, 0)
+ if err == nil {
+ unix.FcntlInt(uintptr(stderrCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC)
+ }
+
+ // And then wrap the clone file descriptors in files.
+ // One benefit of this (that we don't use yet) is that we can actually write
+ // to these files to emit output to the console even though we're intercepting output
+ stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone")
+ stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone")
+
+ //these clones remain alive throughout the lifecycle of the suite and don't need to be recreated
+ //this speeds things up a bit, actually.
+ return stdoutClone, stderrClone
+}
+
+func (impl *dupSyscallOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) {
+ // To redirect output to our pipe we need to point the 1 and 2 file descriptors (which is how the world tries to log things)
+ // to the write end of the pipe.
+ // We do this with Dup2 (possibly Dup3 on some architectures) to have file descriptors 1 and 2 point to the same file description as the pipeWriter
+ // This effectively shunts data written to stdout and stderr to the write end of our pipe
+ unix.Dup2(int(pipeWriter.Fd()), 1)
+ unix.Dup2(int(pipeWriter.Fd()), 2)
+}
+
+func (impl *dupSyscallOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) {
+ // To restore stdour/stderr from the clones we have the 1 and 2 file descriptors
+ // point to the original file descriptions that we saved off in the clones.
+ // This has the added benefit of closing the connection between these descriptors and the write end of the pipe
+ // which is important to cause the io.Copy on the pipe.Reader to end.
+ unix.Dup2(int(stdoutClone.Fd()), 1)
+ unix.Dup2(int(stderrClone.Fd()), 2)
+}
+
+func (impl *dupSyscallOutputInterceptorImpl) ShutdownClones(stdoutClone *os.File, stderrClone *os.File) {
+ // We're done with the clones so we can close them to clean up after ourselves
+ stdoutClone.Close()
+ stderrClone.Close()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go
new file mode 100644
index 0000000..4c37493
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go
@@ -0,0 +1,7 @@
+//go:build wasm
+
+package internal
+
+func NewOutputInterceptor() OutputInterceptor {
+ return &NoopOutputInterceptor{}
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go
new file mode 100644
index 0000000..30c2851
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go
@@ -0,0 +1,7 @@
+// +build windows
+
+package internal
+
+func NewOutputInterceptor() OutputInterceptor {
+ return NewOSGlobalReassigningOutputInterceptor()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
new file mode 100644
index 0000000..b3cd642
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
@@ -0,0 +1,72 @@
+package parallel_support
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type BeforeSuiteState struct {
+ Data []byte
+ State types.SpecState
+}
+
+type ParallelIndexCounter struct {
+ Index int
+}
+
+var ErrorGone = fmt.Errorf("gone")
+var ErrorFailed = fmt.Errorf("failed")
+var ErrorEarly = fmt.Errorf("early")
+
+var POLLING_INTERVAL = 50 * time.Millisecond
+
+type Server interface {
+ Start()
+ Close()
+ Address() string
+ RegisterAlive(node int, alive func() bool)
+ GetSuiteDone() chan interface{}
+ GetOutputDestination() io.Writer
+ SetOutputDestination(io.Writer)
+}
+
+type Client interface {
+ Connect() bool
+ Close() error
+
+ PostSuiteWillBegin(report types.Report) error
+ PostDidRun(report types.SpecReport) error
+ PostSuiteDidEnd(report types.Report) error
+ PostReportBeforeSuiteCompleted(state types.SpecState) error
+ BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error)
+ PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error
+ BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error)
+ BlockUntilNonprimaryProcsHaveFinished() error
+ BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error)
+ FetchNextCounter() (int, error)
+ PostAbort() error
+ ShouldAbort() bool
+ PostEmitProgressReport(report types.ProgressReport) error
+ Write(p []byte) (int, error)
+}
+
+func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) {
+ if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
+ return newHttpServer(parallelTotal, reporter)
+ } else {
+ return newRPCServer(parallelTotal, reporter)
+ }
+}
+
+func NewClient(serverHost string) Client {
+ if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
+ return newHttpClient(serverHost)
+ } else {
+ return newRPCClient(serverHost)
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
new file mode 100644
index 0000000..6547c7a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
@@ -0,0 +1,169 @@
+package parallel_support
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type httpClient struct {
+ serverHost string
+}
+
+func newHttpClient(serverHost string) *httpClient {
+ return &httpClient{
+ serverHost: serverHost,
+ }
+}
+
+func (client *httpClient) Connect() bool {
+ resp, err := http.Get(client.serverHost + "/up")
+ if err != nil {
+ return false
+ }
+ resp.Body.Close()
+ return resp.StatusCode == http.StatusOK
+}
+
+func (client *httpClient) Close() error {
+ return nil
+}
+
+func (client *httpClient) post(path string, data interface{}) error {
+ var body io.Reader
+ if data != nil {
+ encoded, err := json.Marshal(data)
+ if err != nil {
+ return err
+ }
+ body = bytes.NewBuffer(encoded)
+ }
+ resp, err := http.Post(client.serverHost+path, "application/json", body)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
+ }
+ return nil
+}
+
+func (client *httpClient) poll(path string, data interface{}) error {
+ for {
+ resp, err := http.Get(client.serverHost + path)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode == http.StatusTooEarly {
+ resp.Body.Close()
+ time.Sleep(POLLING_INTERVAL)
+ continue
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusGone {
+ return ErrorGone
+ }
+ if resp.StatusCode == http.StatusFailedDependency {
+ return ErrorFailed
+ }
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
+ }
+ if data != nil {
+ return json.NewDecoder(resp.Body).Decode(data)
+ }
+ return nil
+ }
+}
+
+func (client *httpClient) PostSuiteWillBegin(report types.Report) error {
+ return client.post("/suite-will-begin", report)
+}
+
+func (client *httpClient) PostDidRun(report types.SpecReport) error {
+ return client.post("/did-run", report)
+}
+
+func (client *httpClient) PostSuiteDidEnd(report types.Report) error {
+ return client.post("/suite-did-end", report)
+}
+
+func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error {
+ return client.post("/progress-report", report)
+}
+
+func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
+ return client.post("/report-before-suite-completed", state)
+}
+
+func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
+ var state types.SpecState
+ err := client.poll("/report-before-suite-state", &state)
+ if err == ErrorGone {
+ return types.SpecStateFailed, nil
+ }
+ return state, err
+}
+
+func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
+ beforeSuiteState := BeforeSuiteState{
+ State: state,
+ Data: data,
+ }
+ return client.post("/before-suite-completed", beforeSuiteState)
+}
+
+func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
+ var beforeSuiteState BeforeSuiteState
+ err := client.poll("/before-suite-state", &beforeSuiteState)
+ if err == ErrorGone {
+ return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
+ }
+ return beforeSuiteState.State, beforeSuiteState.Data, err
+}
+
+func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error {
+ return client.poll("/have-nonprimary-procs-finished", nil)
+}
+
+func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
+ var report types.Report
+ err := client.poll("/aggregated-nonprimary-procs-report", &report)
+ if err == ErrorGone {
+ return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
+ }
+ return report, err
+}
+
+func (client *httpClient) FetchNextCounter() (int, error) {
+ var counter ParallelIndexCounter
+ err := client.poll("/counter", &counter)
+ return counter.Index, err
+}
+
+func (client *httpClient) PostAbort() error {
+ return client.post("/abort", nil)
+}
+
+func (client *httpClient) ShouldAbort() bool {
+ err := client.poll("/abort", nil)
+ if err == ErrorGone {
+ return true
+ }
+ return false
+}
+
+func (client *httpClient) Write(p []byte) (int, error) {
+ resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p))
+ resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return 0, fmt.Errorf("failed to emit output")
+ }
+ return len(p), err
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
new file mode 100644
index 0000000..d2c71ab
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
@@ -0,0 +1,242 @@
+/*
+
+The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
+This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
+
+*/
+
+package parallel_support
+
+import (
+ "encoding/json"
+ "io"
+ "net"
+ "net/http"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
+It then forwards that communication to attached reporters.
+*/
+type httpServer struct {
+ listener net.Listener
+ handler *ServerHandler
+}
+
+// Create a new server, automatically selecting a port
+func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+ return &httpServer{
+ listener: listener,
+ handler: newServerHandler(parallelTotal, reporter),
+ }, nil
+}
+
+// Start the server. You don't need to `go s.Start()`, just `s.Start()`
+func (server *httpServer) Start() {
+ httpServer := &http.Server{}
+ mux := http.NewServeMux()
+ httpServer.Handler = mux
+
+ //streaming endpoints
+ mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin)
+ mux.HandleFunc("/did-run", server.didRun)
+ mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd)
+ mux.HandleFunc("/emit-output", server.emitOutput)
+ mux.HandleFunc("/progress-report", server.emitProgressReport)
+
+ //synchronization endpoints
+ mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted)
+ mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState)
+ mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted)
+ mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState)
+ mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished)
+ mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport)
+ mux.HandleFunc("/counter", server.handleCounter)
+ mux.HandleFunc("/up", server.handleUp)
+ mux.HandleFunc("/abort", server.handleAbort)
+
+ go httpServer.Serve(server.listener)
+}
+
+// Stop the server
+func (server *httpServer) Close() {
+ server.listener.Close()
+}
+
+// The address the server can be reached it. Pass this into the `ForwardingReporter`.
+func (server *httpServer) Address() string {
+ return "http://" + server.listener.Addr().String()
+}
+
+func (server *httpServer) GetSuiteDone() chan interface{} {
+ return server.handler.done
+}
+
+func (server *httpServer) GetOutputDestination() io.Writer {
+ return server.handler.outputDestination
+}
+
+func (server *httpServer) SetOutputDestination(w io.Writer) {
+ server.handler.outputDestination = w
+}
+
+func (server *httpServer) RegisterAlive(node int, alive func() bool) {
+ server.handler.registerAlive(node, alive)
+}
+
+//
+// Streaming Endpoints
+//
+
+// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
+func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool {
+ defer request.Body.Close()
+ if json.NewDecoder(request.Body).Decode(object) != nil {
+ writer.WriteHeader(http.StatusBadRequest)
+ return false
+ }
+ return true
+}
+
+func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool {
+ if err == nil {
+ return false
+ }
+ switch err {
+ case ErrorEarly:
+ writer.WriteHeader(http.StatusTooEarly)
+ case ErrorGone:
+ writer.WriteHeader(http.StatusGone)
+ case ErrorFailed:
+ writer.WriteHeader(http.StatusFailedDependency)
+ default:
+ writer.WriteHeader(http.StatusInternalServerError)
+ }
+ return true
+}
+
+func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
+ var report types.Report
+ if !server.decode(writer, request, &report) {
+ return
+ }
+
+ server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer)
+}
+
+func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) {
+ var report types.SpecReport
+ if !server.decode(writer, request, &report) {
+ return
+ }
+
+ server.handleError(server.handler.DidRun(report, voidReceiver), writer)
+}
+
+func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
+ var report types.Report
+ if !server.decode(writer, request, &report) {
+ return
+ }
+ server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer)
+}
+
+func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) {
+ output, err := io.ReadAll(request.Body)
+ if err != nil {
+ writer.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ var n int
+ server.handleError(server.handler.EmitOutput(output, &n), writer)
+}
+
+func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) {
+ var report types.ProgressReport
+ if !server.decode(writer, request, &report) {
+ return
+ }
+ server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer)
+}
+
+func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
+ var state types.SpecState
+ if !server.decode(writer, request, &state) {
+ return
+ }
+
+ server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer)
+}
+
+func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
+ var state types.SpecState
+ if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(state)
+}
+
+func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
+ var beforeSuiteState BeforeSuiteState
+ if !server.decode(writer, request, &beforeSuiteState) {
+ return
+ }
+
+ server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer)
+}
+
+func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
+ var beforeSuiteState BeforeSuiteState
+ if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(beforeSuiteState)
+}
+
+func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) {
+ if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) {
+ return
+ }
+ writer.WriteHeader(http.StatusOK)
+}
+
+func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) {
+ var aggregatedReport types.Report
+ if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(aggregatedReport)
+}
+
+func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) {
+ var n int
+ if server.handleError(server.handler.Counter(voidSender, &n), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n})
+}
+
+func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) {
+ writer.WriteHeader(http.StatusOK)
+}
+
+func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) {
+ if request.Method == "GET" {
+ var shouldAbort bool
+ server.handler.ShouldAbort(voidSender, &shouldAbort)
+ if shouldAbort {
+ writer.WriteHeader(http.StatusGone)
+ } else {
+ writer.WriteHeader(http.StatusOK)
+ }
+ } else {
+ server.handler.Abort(voidSender, voidReceiver)
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
new file mode 100644
index 0000000..59e8e6f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
@@ -0,0 +1,136 @@
+package parallel_support
+
+import (
+ "net/rpc"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type rpcClient struct {
+ serverHost string
+ client *rpc.Client
+}
+
+func newRPCClient(serverHost string) *rpcClient {
+ return &rpcClient{
+ serverHost: serverHost,
+ }
+}
+
+func (client *rpcClient) Connect() bool {
+ var err error
+ if client.client != nil {
+ return true
+ }
+ client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/")
+ if err != nil {
+ client.client = nil
+ return false
+ }
+ return true
+}
+
+func (client *rpcClient) Close() error {
+ return client.client.Close()
+}
+
+func (client *rpcClient) poll(method string, data interface{}) error {
+ for {
+ err := client.client.Call(method, voidSender, data)
+ if err == nil {
+ return nil
+ }
+ switch err.Error() {
+ case ErrorEarly.Error():
+ time.Sleep(POLLING_INTERVAL)
+ case ErrorGone.Error():
+ return ErrorGone
+ case ErrorFailed.Error():
+ return ErrorFailed
+ default:
+ return err
+ }
+ }
+}
+
+func (client *rpcClient) PostSuiteWillBegin(report types.Report) error {
+ return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver)
+}
+
+func (client *rpcClient) PostDidRun(report types.SpecReport) error {
+ return client.client.Call("Server.DidRun", report, voidReceiver)
+}
+
+func (client *rpcClient) PostSuiteDidEnd(report types.Report) error {
+ return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver)
+}
+
+func (client *rpcClient) Write(p []byte) (int, error) {
+ var n int
+ err := client.client.Call("Server.EmitOutput", p, &n)
+ return n, err
+}
+
+func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error {
+ return client.client.Call("Server.EmitProgressReport", report, voidReceiver)
+}
+
+func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
+ return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver)
+}
+
+func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
+ var state types.SpecState
+ err := client.poll("Server.ReportBeforeSuiteState", &state)
+ if err == ErrorGone {
+ return types.SpecStateFailed, nil
+ }
+ return state, err
+}
+
+func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
+ beforeSuiteState := BeforeSuiteState{
+ State: state,
+ Data: data,
+ }
+ return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver)
+}
+
+func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
+ var beforeSuiteState BeforeSuiteState
+ err := client.poll("Server.BeforeSuiteState", &beforeSuiteState)
+ if err == ErrorGone {
+ return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
+ }
+ return beforeSuiteState.State, beforeSuiteState.Data, err
+}
+
+func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error {
+ return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver)
+}
+
+func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
+ var report types.Report
+ err := client.poll("Server.AggregatedNonprimaryProcsReport", &report)
+ if err == ErrorGone {
+ return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
+ }
+ return report, err
+}
+
+func (client *rpcClient) FetchNextCounter() (int, error) {
+ var counter int
+ err := client.client.Call("Server.Counter", voidSender, &counter)
+ return counter, err
+}
+
+func (client *rpcClient) PostAbort() error {
+ return client.client.Call("Server.Abort", voidSender, voidReceiver)
+}
+
+func (client *rpcClient) ShouldAbort() bool {
+ var shouldAbort bool
+ client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort)
+ return shouldAbort
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
new file mode 100644
index 0000000..2620fd5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
@@ -0,0 +1,75 @@
+/*
+
+The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
+This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
+
+*/
+
+package parallel_support
+
+import (
+ "io"
+ "net"
+ "net/http"
+ "net/rpc"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+)
+
+/*
+RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
+It then forwards that communication to attached reporters.
+*/
+type RPCServer struct {
+ listener net.Listener
+ handler *ServerHandler
+}
+
+//Create a new server, automatically selecting a port
+func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+ return &RPCServer{
+ listener: listener,
+ handler: newServerHandler(parallelTotal, reporter),
+ }, nil
+}
+
+//Start the server. You don't need to `go s.Start()`, just `s.Start()`
+func (server *RPCServer) Start() {
+ rpcServer := rpc.NewServer()
+ rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server
+
+ httpServer := &http.Server{}
+ httpServer.Handler = rpcServer
+
+ go httpServer.Serve(server.listener)
+}
+
+//Stop the server
+func (server *RPCServer) Close() {
+ server.listener.Close()
+}
+
+//The address the server can be reached it. Pass this into the `ForwardingReporter`.
+func (server *RPCServer) Address() string {
+ return server.listener.Addr().String()
+}
+
+func (server *RPCServer) GetSuiteDone() chan interface{} {
+ return server.handler.done
+}
+
+func (server *RPCServer) GetOutputDestination() io.Writer {
+ return server.handler.outputDestination
+}
+
+func (server *RPCServer) SetOutputDestination(w io.Writer) {
+ server.handler.outputDestination = w
+}
+
+func (server *RPCServer) RegisterAlive(node int, alive func() bool) {
+ server.handler.registerAlive(node, alive)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
new file mode 100644
index 0000000..a6d9879
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
@@ -0,0 +1,234 @@
+package parallel_support
+
+import (
+ "io"
+ "os"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Void struct{}
+
+var voidReceiver *Void = &Void{}
+var voidSender Void
+
+// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server.
+// It handles all the business logic to avoid duplication between the two servers
+
+type ServerHandler struct {
+ done chan interface{}
+ outputDestination io.Writer
+ reporter reporters.Reporter
+ alives []func() bool
+ lock *sync.Mutex
+ beforeSuiteState BeforeSuiteState
+ reportBeforeSuiteState types.SpecState
+ parallelTotal int
+ counter int
+ counterLock *sync.Mutex
+ shouldAbort bool
+
+ numSuiteDidBegins int
+ numSuiteDidEnds int
+ aggregatedReport types.Report
+ reportHoldingArea []types.SpecReport
+}
+
+func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler {
+ return &ServerHandler{
+ reporter: reporter,
+ lock: &sync.Mutex{},
+ counterLock: &sync.Mutex{},
+ alives: make([]func() bool, parallelTotal),
+ beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid},
+
+ parallelTotal: parallelTotal,
+ outputDestination: os.Stdout,
+ done: make(chan interface{}),
+ }
+}
+
+func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+
+ handler.numSuiteDidBegins += 1
+
+ // all summaries are identical, so it's fine to simply emit the last one of these
+ if handler.numSuiteDidBegins == handler.parallelTotal {
+ handler.reporter.SuiteWillBegin(report)
+
+ for _, summary := range handler.reportHoldingArea {
+ handler.reporter.WillRun(summary)
+ handler.reporter.DidRun(summary)
+ }
+
+ handler.reportHoldingArea = nil
+ }
+
+ return nil
+}
+
+func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+
+ if handler.numSuiteDidBegins == handler.parallelTotal {
+ handler.reporter.WillRun(report)
+ handler.reporter.DidRun(report)
+ } else {
+ handler.reportHoldingArea = append(handler.reportHoldingArea, report)
+ }
+
+ return nil
+}
+
+func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+
+ handler.numSuiteDidEnds += 1
+ if handler.numSuiteDidEnds == 1 {
+ handler.aggregatedReport = report
+ } else {
+ handler.aggregatedReport = handler.aggregatedReport.Add(report)
+ }
+
+ if handler.numSuiteDidEnds == handler.parallelTotal {
+ handler.reporter.SuiteDidEnd(handler.aggregatedReport)
+ close(handler.done)
+ }
+
+ return nil
+}
+
+func (handler *ServerHandler) EmitOutput(output []byte, n *int) error {
+ var err error
+ *n, err = handler.outputDestination.Write(output)
+ return err
+}
+
+func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.reporter.EmitProgressReport(report)
+ return nil
+}
+
+func (handler *ServerHandler) registerAlive(proc int, alive func() bool) {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.alives[proc-1] = alive
+}
+
+func (handler *ServerHandler) procIsAlive(proc int) bool {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ alive := handler.alives[proc-1]
+ if alive == nil {
+ return true
+ }
+ return alive()
+}
+
+func (handler *ServerHandler) haveNonprimaryProcsFinished() bool {
+ for i := 2; i <= handler.parallelTotal; i++ {
+ if handler.procIsAlive(i) {
+ return false
+ }
+ }
+ return true
+}
+
+func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.reportBeforeSuiteState = reportBeforeSuiteState
+
+ return nil
+}
+
+func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error {
+ proc1IsAlive := handler.procIsAlive(1)
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ if handler.reportBeforeSuiteState == types.SpecStateInvalid {
+ if proc1IsAlive {
+ return ErrorEarly
+ } else {
+ return ErrorGone
+ }
+ }
+ *reportBeforeSuiteState = handler.reportBeforeSuiteState
+ return nil
+}
+
+func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.beforeSuiteState = beforeSuiteState
+
+ return nil
+}
+
+func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error {
+ proc1IsAlive := handler.procIsAlive(1)
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ if handler.beforeSuiteState.State == types.SpecStateInvalid {
+ if proc1IsAlive {
+ return ErrorEarly
+ } else {
+ return ErrorGone
+ }
+ }
+ *beforeSuiteState = handler.beforeSuiteState
+ return nil
+}
+
+func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error {
+ if handler.haveNonprimaryProcsFinished() {
+ return nil
+ } else {
+ return ErrorEarly
+ }
+}
+
+func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error {
+ if handler.haveNonprimaryProcsFinished() {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ if handler.numSuiteDidEnds == handler.parallelTotal-1 {
+ *report = handler.aggregatedReport
+ return nil
+ } else {
+ return ErrorGone
+ }
+ } else {
+ return ErrorEarly
+ }
+}
+
+func (handler *ServerHandler) Counter(_ Void, counter *int) error {
+ handler.counterLock.Lock()
+ defer handler.counterLock.Unlock()
+ *counter = handler.counter
+ handler.counter++
+ return nil
+}
+
+func (handler *ServerHandler) Abort(_ Void, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.shouldAbort = true
+ return nil
+}
+
+func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ *shouldAbort = handler.shouldAbort
+ return nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
new file mode 100644
index 0000000..11269cf
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
@@ -0,0 +1,287 @@
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var _SOURCE_CACHE = map[string][]string{}
+
+type ProgressSignalRegistrar func(func()) context.CancelFunc
+
+func RegisterForProgressSignal(handler func()) context.CancelFunc {
+ signalChannel := make(chan os.Signal, 1)
+ if len(PROGRESS_SIGNALS) > 0 {
+ signal.Notify(signalChannel, PROGRESS_SIGNALS...)
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ for {
+ select {
+ case <-signalChannel:
+ handler()
+ case <-ctx.Done():
+ signal.Stop(signalChannel)
+ return
+ }
+ }
+ }()
+
+ return cancel
+}
+
+type ProgressStepCursor struct {
+ Text string
+ CodeLocation types.CodeLocation
+ StartTime time.Time
+}
+
+func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep types.SpecEvent, gwOutput string, timelineLocation types.TimelineLocation, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) {
+ pr := types.ProgressReport{
+ ParallelProcess: report.ParallelProcess,
+ RunningInParallel: isRunningInParallel,
+ ContainerHierarchyTexts: report.ContainerHierarchyTexts,
+ LeafNodeText: report.LeafNodeText,
+ LeafNodeLocation: report.LeafNodeLocation,
+ SpecStartTime: report.StartTime,
+
+ CurrentNodeType: currentNode.NodeType,
+ CurrentNodeText: currentNode.Text,
+ CurrentNodeLocation: currentNode.CodeLocation,
+ CurrentNodeStartTime: currentNodeStartTime,
+
+ CurrentStepText: currentStep.Message,
+ CurrentStepLocation: currentStep.CodeLocation,
+ CurrentStepStartTime: currentStep.TimelineLocation.Time,
+
+ AdditionalReports: additionalReports,
+
+ CapturedGinkgoWriterOutput: gwOutput,
+ TimelineLocation: timelineLocation,
+ }
+
+ goroutines, err := extractRunningGoroutines()
+ if err != nil {
+ return pr, err
+ }
+ pr.Goroutines = goroutines
+
+ // now we want to try to find goroutines of interest. these will be goroutines that have any function calls with code in packagesOfInterest:
+ packagesOfInterest := map[string]bool{}
+ packageFromFilename := func(filename string) string {
+ return filepath.Dir(filename)
+ }
+ addPackageFor := func(filename string) {
+ if filename != "" {
+ packagesOfInterest[packageFromFilename(filename)] = true
+ }
+ }
+ isPackageOfInterest := func(filename string) bool {
+ stackPackage := packageFromFilename(filename)
+ for packageOfInterest := range packagesOfInterest {
+ if strings.HasPrefix(stackPackage, packageOfInterest) {
+ return true
+ }
+ }
+ return false
+ }
+ for _, location := range report.ContainerHierarchyLocations {
+ addPackageFor(location.FileName)
+ }
+ addPackageFor(report.LeafNodeLocation.FileName)
+ addPackageFor(currentNode.CodeLocation.FileName)
+ addPackageFor(currentStep.CodeLocation.FileName)
+
+ //First, we find the SpecGoroutine - this will be the goroutine that includes `runNode`
+ specGoRoutineIdx := -1
+ runNodeFunctionCallIdx := -1
+OUTER:
+ for goroutineIdx, goroutine := range pr.Goroutines {
+ for functionCallIdx, functionCall := range goroutine.Stack {
+ if strings.Contains(functionCall.Function, "ginkgo/v2/internal.(*Suite).runNode.func") {
+ specGoRoutineIdx = goroutineIdx
+ runNodeFunctionCallIdx = functionCallIdx
+ break OUTER
+ }
+ }
+ }
+
+ //Now, we find the first non-Ginkgo function call
+ if specGoRoutineIdx > -1 {
+ for runNodeFunctionCallIdx >= 0 {
+ fn := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function
+ file := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename
+ // these are all things that could potentially happen from within ginkgo
+ if strings.Contains(fn, "ginkgo/v2/internal") || strings.Contains(fn, "reflect.Value") || strings.Contains(file, "ginkgo/table_dsl") || strings.Contains(file, "ginkgo/core_dsl") {
+ runNodeFunctionCallIdx--
+ continue
+ }
+ if strings.Contains(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function, "ginkgo/table_dsl") {
+
+ }
+ //found it! lets add its package of interest
+ addPackageFor(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename)
+ break
+ }
+ }
+
+ ginkgoEntryPointIdx := -1
+OUTER_GINKGO_ENTRY_POINT:
+ for goroutineIdx, goroutine := range pr.Goroutines {
+ for _, functionCall := range goroutine.Stack {
+ if strings.Contains(functionCall.Function, "ginkgo/v2.RunSpecs") {
+ ginkgoEntryPointIdx = goroutineIdx
+ break OUTER_GINKGO_ENTRY_POINT
+ }
+ }
+ }
+
+ // Now we go through all goroutines and highlight any lines with packages in `packagesOfInterest`
+ // Any goroutines with highlighted lines end up in the HighlightGoRoutines
+ for goroutineIdx, goroutine := range pr.Goroutines {
+ if goroutineIdx == ginkgoEntryPointIdx {
+ continue
+ }
+ if goroutineIdx == specGoRoutineIdx {
+ pr.Goroutines[goroutineIdx].IsSpecGoroutine = true
+ }
+ for functionCallIdx, functionCall := range goroutine.Stack {
+ if isPackageOfInterest(functionCall.Filename) {
+ goroutine.Stack[functionCallIdx].Highlight = true
+ goroutine.Stack[functionCallIdx].Source, goroutine.Stack[functionCallIdx].SourceHighlight = fetchSource(functionCall.Filename, functionCall.Line, 2, sourceRoots)
+ }
+ }
+ }
+
+ if !includeAll {
+ goroutines := []types.Goroutine{pr.SpecGoroutine()}
+ goroutines = append(goroutines, pr.HighlightedGoroutines()...)
+ pr.Goroutines = goroutines
+ }
+
+ return pr, nil
+}
+
+func extractRunningGoroutines() ([]types.Goroutine, error) {
+ var stack []byte
+ for size := 64 * 1024; ; size *= 2 {
+ stack = make([]byte, size)
+ if n := runtime.Stack(stack, true); n < size {
+ stack = stack[:n]
+ break
+ }
+ }
+ r := bufio.NewReader(bytes.NewReader(stack))
+ out := []types.Goroutine{}
+ idx := -1
+ for {
+ line, err := r.ReadString('\n')
+ if err == io.EOF {
+ break
+ }
+
+ line = strings.TrimSuffix(line, "\n")
+
+ //skip blank lines
+ if line == "" {
+ continue
+ }
+
+ //parse headers for new goroutine frames
+ if strings.HasPrefix(line, "goroutine") {
+ out = append(out, types.Goroutine{})
+ idx = len(out) - 1
+
+ line = strings.TrimPrefix(line, "goroutine ")
+ line = strings.TrimSuffix(line, ":")
+ fields := strings.SplitN(line, " ", 2)
+ if len(fields) != 2 {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine frame header: %s", line))
+ }
+ out[idx].ID, err = strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine ID: %s", fields[1]))
+ }
+
+ out[idx].State = strings.TrimSuffix(strings.TrimPrefix(fields[1], "["), "]")
+ continue
+ }
+
+ //if we are here we must be at a function call entry in the stack
+ functionCall := types.FunctionCall{
+ Function: strings.TrimPrefix(line, "created by "), // no need to track 'created by'
+ }
+
+ line, err = r.ReadString('\n')
+ line = strings.TrimSuffix(line, "\n")
+ if err == io.EOF {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function))
+ }
+ line = strings.TrimLeft(line, " \t")
+ delimiterIdx := strings.LastIndex(line, ":")
+ if delimiterIdx == -1 {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename and line number: %s", line))
+ }
+ functionCall.Filename = line[:delimiterIdx]
+ line = strings.Split(line[delimiterIdx+1:], " ")[0]
+ lineNumber, err := strconv.ParseInt(line, 10, 64)
+ functionCall.Line = int(lineNumber)
+ if err != nil {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error()))
+ }
+ out[idx].Stack = append(out[idx].Stack, functionCall)
+ }
+
+ return out, nil
+}
+
+func fetchSource(filename string, lineNumber int, span int, configuredSourceRoots []string) ([]string, int) {
+ if filename == "" {
+ return []string{}, 0
+ }
+
+ var lines []string
+ var ok bool
+ if lines, ok = _SOURCE_CACHE[filename]; !ok {
+ sourceRoots := []string{""}
+ sourceRoots = append(sourceRoots, configuredSourceRoots...)
+ var data []byte
+ var err error
+ var found bool
+ for _, root := range sourceRoots {
+ data, err = os.ReadFile(filepath.Join(root, filename))
+ if err == nil {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return []string{}, 0
+ }
+ lines = strings.Split(string(data), "\n")
+ _SOURCE_CACHE[filename] = lines
+ }
+
+ startIndex := lineNumber - span - 1
+ endIndex := startIndex + span + span + 1
+ if startIndex < 0 {
+ startIndex = 0
+ }
+ if endIndex > len(lines) {
+ endIndex = len(lines)
+ }
+ highlightIndex := lineNumber - 1 - startIndex
+ return lines[startIndex:endIndex], highlightIndex
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go
new file mode 100644
index 0000000..61e0ed3
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go
@@ -0,0 +1,11 @@
+//go:build freebsd || openbsd || netbsd || darwin || dragonfly
+// +build freebsd openbsd netbsd darwin dragonfly
+
+package internal
+
+import (
+ "os"
+ "syscall"
+)
+
+var PROGRESS_SIGNALS = []os.Signal{syscall.SIGINFO, syscall.SIGUSR1}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go
new file mode 100644
index 0000000..ad30de4
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go
@@ -0,0 +1,11 @@
+//go:build linux || solaris
+// +build linux solaris
+
+package internal
+
+import (
+ "os"
+ "syscall"
+)
+
+var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go
new file mode 100644
index 0000000..8c53fe0
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go
@@ -0,0 +1,10 @@
+//go:build wasm
+
+package internal
+
+import (
+ "os"
+ "syscall"
+)
+
+var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go
new file mode 100644
index 0000000..0eca251
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go
@@ -0,0 +1,8 @@
+//go:build windows
+// +build windows
+
+package internal
+
+import "os"
+
+var PROGRESS_SIGNALS = []os.Signal{}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
new file mode 100644
index 0000000..2c6e260
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
@@ -0,0 +1,79 @@
+package internal
+
+import (
+ "context"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type ProgressReporterManager struct {
+ lock *sync.Mutex
+ progressReporters map[int]func() string
+ prCounter int
+}
+
+func NewProgressReporterManager() *ProgressReporterManager {
+ return &ProgressReporterManager{
+ progressReporters: map[int]func() string{},
+ lock: &sync.Mutex{},
+ }
+}
+
+func (prm *ProgressReporterManager) AttachProgressReporter(reporter func() string) func() {
+ prm.lock.Lock()
+ defer prm.lock.Unlock()
+ prm.prCounter += 1
+ prCounter := prm.prCounter
+ prm.progressReporters[prCounter] = reporter
+
+ return func() {
+ prm.lock.Lock()
+ defer prm.lock.Unlock()
+ delete(prm.progressReporters, prCounter)
+ }
+}
+
+func (prm *ProgressReporterManager) QueryProgressReporters(ctx context.Context, failer *Failer) []string {
+ prm.lock.Lock()
+ keys := []int{}
+ for key := range prm.progressReporters {
+ keys = append(keys, key)
+ }
+ sort.Ints(keys)
+ reporters := []func() string{}
+ for _, key := range keys {
+ reporters = append(reporters, prm.progressReporters[key])
+ }
+ prm.lock.Unlock()
+
+ if len(reporters) == 0 {
+ return nil
+ }
+ out := []string{}
+ for _, reporter := range reporters {
+ reportC := make(chan string, 1)
+ go func() {
+ defer func() {
+ e := recover()
+ if e != nil {
+ failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
+ reportC <- "failed to query attached progress reporter"
+ }
+ }()
+ reportC <- reporter()
+ }()
+ var report string
+ select {
+ case report = <-reportC:
+ case <-ctx.Done():
+ return out
+ }
+ if strings.TrimSpace(report) != "" {
+ out = append(out, report)
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
new file mode 100644
index 0000000..cc351a3
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
@@ -0,0 +1,39 @@
+package internal
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type ReportEntry = types.ReportEntry
+
+func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) {
+ out := ReportEntry{
+ Visibility: types.ReportEntryVisibilityAlways,
+ Name: name,
+ Location: cl,
+ Time: time.Now(),
+ }
+ var didSetValue = false
+ for _, arg := range args {
+ switch x := arg.(type) {
+ case types.ReportEntryVisibility:
+ out.Visibility = x
+ case types.CodeLocation:
+ out.Location = x
+ case Offset:
+ out.Location = types.NewCodeLocation(2 + int(x))
+ case time.Time:
+ out.Time = x
+ default:
+ if didSetValue {
+ return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg)
+ }
+ out.Value = types.WrapEntryValue(arg)
+ didSetValue = true
+ }
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec.go
new file mode 100644
index 0000000..7c4ee5b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec.go
@@ -0,0 +1,87 @@
+package internal
+
+import (
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Spec struct {
+ Nodes Nodes
+ Skip bool
+}
+
+func (s Spec) SubjectID() uint {
+ return s.Nodes.FirstNodeWithType(types.NodeTypeIt).ID
+}
+
+func (s Spec) Text() string {
+ texts := []string{}
+ for i := range s.Nodes {
+ if s.Nodes[i].Text != "" {
+ texts = append(texts, s.Nodes[i].Text)
+ }
+ }
+ return strings.Join(texts, " ")
+}
+
+func (s Spec) FirstNodeWithType(nodeTypes types.NodeType) Node {
+ return s.Nodes.FirstNodeWithType(nodeTypes)
+}
+
+func (s Spec) FlakeAttempts() int {
+ flakeAttempts := 0
+ for i := range s.Nodes {
+ if s.Nodes[i].FlakeAttempts > 0 {
+ flakeAttempts = s.Nodes[i].FlakeAttempts
+ }
+ }
+
+ return flakeAttempts
+}
+
+func (s Spec) MustPassRepeatedly() int {
+ mustPassRepeatedly := 0
+ for i := range s.Nodes {
+ if s.Nodes[i].MustPassRepeatedly > 0 {
+ mustPassRepeatedly = s.Nodes[i].MustPassRepeatedly
+ }
+ }
+
+ return mustPassRepeatedly
+}
+
+func (s Spec) SpecTimeout() time.Duration {
+ return s.FirstNodeWithType(types.NodeTypeIt).SpecTimeout
+}
+
+type Specs []Spec
+
+func (s Specs) HasAnySpecsMarkedPending() bool {
+ for i := range s {
+ if s[i].Nodes.HasNodeMarkedPending() {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (s Specs) CountWithoutSkip() int {
+ n := 0
+ for i := range s {
+ if !s[i].Skip {
+ n += 1
+ }
+ }
+ return n
+}
+
+func (s Specs) AtIndices(indices SpecIndices) Specs {
+ out := make(Specs, len(indices))
+ for i, idx := range indices {
+ out[i] = s[idx]
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
new file mode 100644
index 0000000..2d2ea2f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
@@ -0,0 +1,47 @@
+package internal
+
+import (
+ "context"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type SpecContext interface {
+ context.Context
+
+ SpecReport() types.SpecReport
+ AttachProgressReporter(func() string) func()
+}
+
+type specContext struct {
+ context.Context
+ *ProgressReporterManager
+
+ cancel context.CancelCauseFunc
+
+ suite *Suite
+}
+
+/*
+SpecContext includes a reference to `suite` and embeds itself in itself as a "GINKGO_SPEC_CONTEXT" value. This allows users to create child Contexts without having down-stream consumers (e.g. Gomega) lose access to the SpecContext and its methods. This allows us to build extensions on top of Ginkgo that simply take an all-encompassing context.
+
+Note that while SpecContext is used to enforce deadlines by Ginkgo it is not configured as a context.WithDeadline. Instead, Ginkgo owns responsibility for cancelling the context when the deadline elapses.
+
+This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation.
+*/
+func NewSpecContext(suite *Suite) *specContext {
+ ctx, cancel := context.WithCancelCause(context.Background())
+ sc := &specContext{
+ cancel: cancel,
+ suite: suite,
+ ProgressReporterManager: NewProgressReporterManager(),
+ }
+ ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo
+ sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies
+
+ return sc
+}
+
+func (sc *specContext) SpecReport() types.SpecReport {
+ return sc.suite.CurrentSpecReport()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_patch.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_patch.go
new file mode 100644
index 0000000..2d0bcc9
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_patch.go
@@ -0,0 +1,22 @@
+package internal
+
+import (
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func (s Spec) CodeLocations() []types.CodeLocation {
+ return s.Nodes.CodeLocations()
+}
+
+func (s Spec) AppendText(text string) {
+ s.Nodes[len(s.Nodes)-1].Text += text
+}
+
+func (s Spec) Labels() []string {
+ var labels []string
+ for _, n := range s.Nodes {
+ labels = append(labels, n.Labels...)
+ }
+
+ return labels
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
new file mode 100644
index 0000000..12e50b8
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
@@ -0,0 +1,1053 @@
+package internal
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/net/context"
+)
+
+type Phase uint
+
+const (
+ PhaseBuildTopLevel Phase = iota
+ PhaseBuildTree
+ PhaseRun
+)
+
+var PROGRESS_REPORTER_DEADLING = 5 * time.Second
+
+type Suite struct {
+ tree *TreeNode
+ topLevelContainers Nodes
+
+ *ProgressReporterManager
+
+ phase Phase
+
+ suiteNodes Nodes
+ cleanupNodes Nodes
+
+ failer *Failer
+ reporter reporters.Reporter
+ writer WriterInterface
+ outputInterceptor OutputInterceptor
+ interruptHandler interrupt_handler.InterruptHandlerInterface
+ config types.SuiteConfig
+ deadline time.Time
+
+ skipAll bool
+ report types.Report
+ currentSpecReport types.SpecReport
+ currentNode Node
+ currentNodeStartTime time.Time
+
+ currentSpecContext *specContext
+
+ currentByStep types.SpecEvent
+ timelineOrder int
+
+ /*
+ We don't need to lock around all operations. Just those that *could* happen concurrently.
+
+ Suite, generally, only runs one node at a time - and so the possibiity for races is small. In fact, the presence of a race usually indicates the user has launched a goroutine that has leaked past the node it was launched in.
+
+ However, there are some operations that can happen concurrently:
+
+ - AddReportEntry and CurrentSpecReport can be accessed at any point by the user - including in goroutines that outlive the node intentionally (see, e.g. #1020). They both form a self-contained read-write pair and so a lock in them is sufficent.
+ - generateProgressReport can be invoked at any point in time by an interrupt or a progres poll. Moreover, it requires access to currentSpecReport, currentNode, currentNodeStartTime, and progressStepCursor. To make it threadsafe we need to lock around generateProgressReport when we read those variables _and_ everywhere those variables are *written*. In general we don't need to worry about all possible field writes to these variables as what `generateProgressReport` does with these variables is fairly selective (hence the name of the lock). Specifically, we dont' need to lock around state and failure message changes on `currentSpecReport` - just the setting of the variable itself.
+ */
+ selectiveLock *sync.Mutex
+
+ client parallel_support.Client
+
+ annotateFn AnnotateFunc
+}
+
+func NewSuite() *Suite {
+ return &Suite{
+ tree: &TreeNode{},
+ phase: PhaseBuildTopLevel,
+ ProgressReporterManager: NewProgressReporterManager(),
+
+ selectiveLock: &sync.Mutex{},
+ }
+}
+
+func (suite *Suite) Clone() (*Suite, error) {
+ if suite.phase != PhaseBuildTopLevel {
+ return nil, fmt.Errorf("cannot clone suite after tree has been built")
+ }
+ return &Suite{
+ tree: &TreeNode{},
+ phase: PhaseBuildTopLevel,
+ ProgressReporterManager: NewProgressReporterManager(),
+ topLevelContainers: suite.topLevelContainers.Clone(),
+ suiteNodes: suite.suiteNodes.Clone(),
+ selectiveLock: &sync.Mutex{},
+ }, nil
+}
+
+func (suite *Suite) BuildTree() error {
+ // During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered
+ // We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree
+ suite.phase = PhaseBuildTree
+ for _, topLevelContainer := range suite.topLevelContainers {
+ err := suite.PushNode(topLevelContainer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) {
+ if suite.phase != PhaseBuildTree {
+ panic("cannot run before building the tree = call suite.BuildTree() first")
+ }
+ ApplyNestedFocusPolicyToTree(suite.tree)
+ specs := GenerateSpecsFromTreeRoot(suite.tree)
+ if suite.annotateFn != nil {
+ for _, spec := range specs {
+ suite.annotateFn(spec.Text(), spec)
+ }
+ }
+ specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig)
+
+ suite.phase = PhaseRun
+ suite.client = client
+ suite.failer = failer
+ suite.reporter = reporter
+ suite.writer = writer
+ suite.outputInterceptor = outputInterceptor
+ suite.interruptHandler = interruptHandler
+ suite.config = suiteConfig
+
+ if suite.config.Timeout > 0 {
+ suite.deadline = time.Now().Add(suite.config.Timeout)
+ }
+
+ cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal)
+
+ success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
+
+ cancelProgressHandler()
+
+ return success, hasProgrammaticFocus
+}
+
+func (suite *Suite) InRunPhase() bool {
+ return suite.phase == PhaseRun
+}
+
+/*
+ Tree Construction methods
+
+ PushNode is used during PhaseBuildTopLevel and PhaseBuildTree
+*/
+
+func (suite *Suite) PushNode(node Node) error {
+ if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
+ return suite.pushCleanupNode(node)
+ }
+
+ if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
+ return suite.pushSuiteNode(node)
+ }
+
+ if suite.phase == PhaseRun {
+ return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation)
+ }
+
+ if node.MarkedSerial {
+ firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
+ if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial {
+ return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType)
+ }
+ }
+
+ if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
+ firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
+ if firstOrderedNode.IsZero() {
+ return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType)
+ }
+ }
+
+ if node.MarkedContinueOnFailure {
+ firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
+ if !firstOrderedNode.IsZero() {
+ return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)
+ }
+ }
+
+ if node.NodeType == types.NodeTypeContainer {
+ // During PhaseBuildTopLevel we only track the top level containers without entering them
+ // We only enter the top level container nodes during PhaseBuildTree
+ //
+ // This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives
+ // the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs`
+ // is invoked. This makes the lifecycle easier to reason about and solves issues like #693.
+ if suite.phase == PhaseBuildTopLevel {
+ suite.topLevelContainers = append(suite.topLevelContainers, node)
+ return nil
+ }
+ if suite.phase == PhaseBuildTree {
+ parentTree := suite.tree
+ suite.tree = &TreeNode{Node: node}
+ parentTree.AppendChild(suite.tree)
+ err := func() (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation)
+ }
+ }()
+ node.Body(nil)
+ return err
+ }()
+ suite.tree = parentTree
+ return err
+ }
+ } else {
+ suite.tree.AppendChild(&TreeNode{Node: node})
+ return nil
+ }
+
+ return nil
+}
+
+func (suite *Suite) pushSuiteNode(node Node) error {
+ if suite.phase == PhaseBuildTree {
+ return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation)
+ }
+
+ if suite.phase == PhaseRun {
+ return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation)
+ }
+
+ switch node.NodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
+ existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
+ if len(existingBefores) > 0 {
+ return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation)
+ }
+ case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
+ existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
+ if len(existingAfters) > 0 {
+ return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation)
+ }
+ }
+
+ suite.suiteNodes = append(suite.suiteNodes, node)
+ return nil
+}
+
+func (suite *Suite) pushCleanupNode(node Node) error {
+ if suite.phase != PhaseRun || suite.currentNode.IsZero() {
+ return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation)
+ }
+
+ switch suite.currentNode.NodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
+ node.NodeType = types.NodeTypeCleanupAfterSuite
+ case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
+ node.NodeType = types.NodeTypeCleanupAfterAll
+ case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite:
+ return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
+ case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
+ return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
+ default:
+ node.NodeType = types.NodeTypeCleanupAfterEach
+ }
+
+ node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID
+ node.NestingLevel = suite.currentNode.NestingLevel
+ suite.selectiveLock.Lock()
+ suite.cleanupNodes = append(suite.cleanupNodes, node)
+ suite.selectiveLock.Unlock()
+
+ return nil
+}
+
+func (suite *Suite) generateTimelineLocation() types.TimelineLocation {
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+
+ suite.timelineOrder += 1
+ return types.TimelineLocation{
+ Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(),
+ Order: suite.timelineOrder,
+ Time: time.Now(),
+ }
+}
+
+func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent {
+ event.TimelineLocation = suite.generateTimelineLocation()
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
+ suite.selectiveLock.Unlock()
+ suite.reporter.EmitSpecEvent(event)
+ return event
+}
+
+func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) {
+ event := startEvent
+ event.SpecEventType = eventType
+ event.TimelineLocation = suite.generateTimelineLocation()
+ event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time)
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
+ suite.selectiveLock.Unlock()
+ suite.reporter.EmitSpecEvent(event)
+}
+
+func (suite *Suite) By(text string, callback ...func()) error {
+ cl := types.NewCodeLocation(2)
+ if suite.phase != PhaseRun {
+ return types.GinkgoErrors.ByNotDuringRunPhase(cl)
+ }
+
+ event := suite.handleSpecEvent(types.SpecEvent{
+ SpecEventType: types.SpecEventByStart,
+ CodeLocation: cl,
+ Message: text,
+ })
+ suite.selectiveLock.Lock()
+ suite.currentByStep = event
+ suite.selectiveLock.Unlock()
+
+ if len(callback) == 1 {
+ defer func() {
+ suite.selectiveLock.Lock()
+ suite.currentByStep = types.SpecEvent{}
+ suite.selectiveLock.Unlock()
+ suite.handleSpecEventEnd(types.SpecEventByEnd, event)
+ }()
+ callback[0]()
+ } else if len(callback) > 1 {
+ panic("just one callback per By, please")
+ }
+ return nil
+}
+
+/*
+Spec Running methods - used during PhaseRun
+*/
+func (suite *Suite) CurrentSpecReport() types.SpecReport {
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+ report := suite.currentSpecReport
+ if suite.writer != nil {
+ report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
+ }
+ report.ReportEntries = make([]ReportEntry, len(report.ReportEntries))
+ copy(report.ReportEntries, suite.currentSpecReport.ReportEntries)
+ return report
+}
+
+// Only valid in the preview context. In general suite.report only includes
+// the specs run by _this_ node - it is only at the end of the suite that
+// the parallel reports are aggregated. However in the preview context we run
+// in series and
+func (suite *Suite) GetPreviewReport() types.Report {
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+ return suite.report
+}
+
+func (suite *Suite) AddReportEntry(entry ReportEntry) error {
+ if suite.phase != PhaseRun {
+ return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
+ }
+ entry.TimelineLocation = suite.generateTimelineLocation()
+ entry.Time = entry.TimelineLocation.Time
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
+ suite.selectiveLock.Unlock()
+ suite.reporter.EmitReportEntry(entry)
+ return nil
+}
+
+func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport {
+ timelineLocation := suite.generateTimelineLocation()
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+
+ deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING)
+ defer cancel()
+ var additionalReports []string
+ if suite.currentSpecContext != nil {
+ additionalReports = append(additionalReports, suite.currentSpecContext.QueryProgressReporters(deadline, suite.failer)...)
+ }
+ additionalReports = append(additionalReports, suite.QueryProgressReporters(deadline, suite.failer)...)
+ gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes())
+ pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport)
+
+ if err != nil {
+ fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error())
+ }
+ return pr
+}
+
+func (suite *Suite) handleProgressSignal() {
+ report := suite.generateProgressReport(false)
+ report.Message = "{{bold}}You've requested a progress report:{{/}}"
+ suite.emitProgressReport(report)
+}
+
+func (suite *Suite) emitProgressReport(report types.ProgressReport) {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.ProgressReports = append(suite.currentSpecReport.ProgressReports, report.WithoutCapturedGinkgoWriterOutput())
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.EmitProgressReport(report)
+ if suite.isRunningInParallel() {
+ err := suite.client.PostEmitProgressReport(report)
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+ }
+}
+
+func (suite *Suite) isRunningInParallel() bool {
+ return suite.config.ParallelTotal > 1
+}
+
+func (suite *Suite) processCurrentSpecReport() {
+ suite.reporter.DidRun(suite.currentSpecReport)
+ if suite.isRunningInParallel() {
+ suite.client.PostDidRun(suite.currentSpecReport)
+ }
+ suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport)
+
+ if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
+ suite.report.SuiteSucceeded = false
+ if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) {
+ suite.skipAll = true
+ if suite.isRunningInParallel() {
+ suite.client.PostAbort()
+ }
+ }
+ }
+}
+
+func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
+ numSpecsThatWillBeRun := specs.CountWithoutSkip()
+
+ suite.report = types.Report{
+ SuitePath: suitePath,
+ SuiteDescription: description,
+ SuiteLabels: suiteLabels,
+ SuiteConfig: suite.config,
+ SuiteHasProgrammaticFocus: hasProgrammaticFocus,
+ PreRunStats: types.PreRunStats{
+ TotalSpecs: len(specs),
+ SpecsThatWillRun: numSpecsThatWillBeRun,
+ },
+ StartTime: time.Now(),
+ }
+
+ suite.reporter.SuiteWillBegin(suite.report)
+ if suite.isRunningInParallel() {
+ suite.client.PostSuiteWillBegin(suite.report)
+ }
+
+ suite.report.SuiteSucceeded = true
+
+ suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite)
+
+ ranBeforeSuite := suite.report.SuiteSucceeded
+ if suite.report.SuiteSucceeded {
+ suite.runBeforeSuite(numSpecsThatWillBeRun)
+ }
+
+ if suite.report.SuiteSucceeded {
+ groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
+ nextIndex := MakeIncrementingIndexCounter()
+ if suite.isRunningInParallel() {
+ nextIndex = suite.client.FetchNextCounter
+ }
+
+ for {
+ groupedSpecIdx, err := nextIndex()
+ if err != nil {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error()))
+ suite.report.SuiteSucceeded = false
+ break
+ }
+
+ if groupedSpecIdx >= len(groupedSpecIndices) {
+ if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 {
+ groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter()
+ suite.client.BlockUntilNonprimaryProcsHaveFinished()
+ continue
+ }
+ break
+ }
+
+ // the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts
+ // we encapsulate that complexity in the notion of a Group that can run
+ // Group is really just an extension of suite so it gets passed a suite and has access to all its internals
+ // Note that group is stateful and intended for single use!
+ newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx]))
+ }
+
+ if suite.config.FailOnPending && specs.HasAnySpecsMarkedPending() {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set")
+ suite.report.SuiteSucceeded = false
+ }
+
+ if suite.config.FailOnEmpty && specs.CountWithoutSkip() == 0 {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected no specs ran and --fail-on-empty is set")
+ suite.report.SuiteSucceeded = false
+ }
+ }
+
+ if ranBeforeSuite {
+ suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
+ }
+
+ interruptStatus := suite.interruptHandler.Status()
+ if interruptStatus.Interrupted() {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String())
+ suite.report.SuiteSucceeded = false
+ }
+ suite.report.EndTime = time.Now()
+ suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime)
+ if !suite.deadline.IsZero() && suite.report.EndTime.After(suite.deadline) {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite Timeout Elapsed")
+ suite.report.SuiteSucceeded = false
+ }
+
+ suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite)
+ suite.reporter.SuiteDidEnd(suite.report)
+ if suite.isRunningInParallel() {
+ suite.client.PostSuiteDidEnd(suite.report)
+ }
+
+ return suite.report.SuiteSucceeded
+}
+
+func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
+ beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
+ if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: beforeSuiteNode.NodeType,
+ LeafNodeLocation: beforeSuiteNode.CodeLocation,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runSuiteNode(beforeSuiteNode)
+ if suite.currentSpecReport.State.Is(types.SpecStateSkipped) {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite")
+ suite.skipAll = true
+ }
+ suite.processCurrentSpecReport()
+ }
+}
+
+func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
+ afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
+ if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: afterSuiteNode.NodeType,
+ LeafNodeLocation: afterSuiteNode.CodeLocation,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runSuiteNode(afterSuiteNode)
+ suite.processCurrentSpecReport()
+ }
+
+ afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse()
+ if len(afterSuiteCleanup) > 0 {
+ for _, cleanupNode := range afterSuiteCleanup {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: cleanupNode.NodeType,
+ LeafNodeLocation: cleanupNode.CodeLocation,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runSuiteNode(cleanupNode)
+ suite.processCurrentSpecReport()
+ }
+ }
+}
+
+func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
+ nodes := spec.Nodes.WithType(nodeType)
+ if nodeType == types.NodeTypeReportAfterEach {
+ nodes = nodes.SortedByDescendingNestingLevel()
+ }
+ if nodeType == types.NodeTypeReportBeforeEach {
+ nodes = nodes.SortedByAscendingNestingLevel()
+ }
+ if len(nodes) == 0 {
+ return
+ }
+
+ for i := range nodes {
+ suite.writer.Truncate()
+ suite.outputInterceptor.StartInterceptingOutput()
+ report := suite.currentSpecReport
+ nodes[i].Body = func(ctx SpecContext) {
+ nodes[i].ReportEachBody(ctx, report)
+ }
+ state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i]))
+
+ // If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state.
+ // Also, if the reporter is every aborted - always override the state to propagate the abort
+ if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) {
+ suite.currentSpecReport.State = state
+ suite.currentSpecReport.Failure = failure
+ }
+ suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ }
+}
+
+func (suite *Suite) runSuiteNode(node Node) {
+ if suite.config.DryRun {
+ suite.currentSpecReport.State = types.SpecStatePassed
+ return
+ }
+
+ suite.writer.Truncate()
+ suite.outputInterceptor.StartInterceptingOutput()
+ suite.currentSpecReport.StartTime = time.Now()
+
+ var err error
+ switch node.NodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite:
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ case types.NodeTypeCleanupAfterSuite:
+ if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 {
+ err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
+ }
+ if err == nil {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ }
+ case types.NodeTypeSynchronizedBeforeSuite:
+ var data []byte
+ var runAllProcs bool
+ if suite.config.ParallelProcess == 1 {
+ if suite.config.ParallelTotal > 1 {
+ suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
+ }
+ node.Body = func(c SpecContext) { data = node.SynchronizedBeforeSuiteProc1Body(c) }
+ node.HasContext = node.SynchronizedBeforeSuiteProc1BodyHasContext
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ if suite.config.ParallelTotal > 1 {
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ suite.outputInterceptor.StartInterceptingOutput()
+ if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
+ err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data)
+ } else {
+ err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil)
+ }
+ }
+ runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil
+ } else {
+ var proc1State types.SpecState
+ proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData()
+ switch proc1State {
+ case types.SpecStatePassed:
+ runAllProcs = true
+ case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout:
+ err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1()
+ case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped:
+ suite.currentSpecReport.State = proc1State
+ }
+ }
+ if runAllProcs {
+ node.Body = func(c SpecContext) { node.SynchronizedBeforeSuiteAllProcsBody(c, data) }
+ node.HasContext = node.SynchronizedBeforeSuiteAllProcsBodyHasContext
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ }
+ case types.NodeTypeSynchronizedAfterSuite:
+ node.Body = node.SynchronizedAfterSuiteAllProcsBody
+ node.HasContext = node.SynchronizedAfterSuiteAllProcsBodyHasContext
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ if suite.config.ParallelProcess == 1 {
+ if suite.config.ParallelTotal > 1 {
+ err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
+ }
+ if err == nil {
+ if suite.config.ParallelTotal > 1 {
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
+ }
+
+ node.Body = node.SynchronizedAfterSuiteProc1Body
+ node.HasContext = node.SynchronizedAfterSuiteProc1BodyHasContext
+ state, failure := suite.runNode(node, time.Time{}, "")
+ if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure
+ }
+ }
+ }
+ }
+
+ if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
+ suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
+ }
+
+ suite.currentSpecReport.EndTime = time.Now()
+ suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
+ suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+}
+
+func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) {
+ nodes := suite.suiteNodes.WithType(nodeType)
+ // only run ReportAfterSuite on proc 1
+ if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 {
+ return
+ }
+ // if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed
+ if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 {
+ state, err := suite.client.BlockUntilReportBeforeSuiteCompleted()
+ if err != nil || state.Is(types.SpecStateFailed) {
+ suite.report.SuiteSucceeded = false
+ }
+ return
+ }
+
+ for _, node := range nodes {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: node.NodeType,
+ LeafNodeLocation: node.CodeLocation,
+ LeafNodeText: node.Text,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runReportSuiteNode(node, suite.report)
+ suite.processCurrentSpecReport()
+ }
+
+ // if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done
+ if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 {
+ if suite.report.SuiteSucceeded {
+ suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed)
+ } else {
+ suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed)
+ }
+ }
+}
+
+func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
+ suite.writer.Truncate()
+ suite.outputInterceptor.StartInterceptingOutput()
+ suite.currentSpecReport.StartTime = time.Now()
+
+ // if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and
+ // (b) always fetch the latest report as prior ReportAfterSuites will contribute to it
+ if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() {
+ aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
+ if err != nil {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
+ suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
+ return
+ }
+ report = report.Add(aggregatedReport)
+ }
+
+ node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) }
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+
+ suite.currentSpecReport.EndTime = time.Now()
+ suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
+ suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
+ suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
+}
+
+func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) {
+ if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
+ suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node)
+ }
+
+ interruptStatus := suite.interruptHandler.Status()
+ if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt) {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+
+ suite.selectiveLock.Lock()
+ suite.currentNode = node
+ suite.currentNodeStartTime = time.Now()
+ suite.currentByStep = types.SpecEvent{}
+ suite.selectiveLock.Unlock()
+ defer func() {
+ suite.selectiveLock.Lock()
+ suite.currentNode = Node{}
+ suite.currentNodeStartTime = time.Time{}
+ suite.selectiveLock.Unlock()
+ }()
+
+ if text == "" {
+ text = "TOP-LEVEL"
+ }
+ event := suite.handleSpecEvent(types.SpecEvent{
+ SpecEventType: types.SpecEventNodeStart,
+ NodeType: node.NodeType,
+ Message: text,
+ CodeLocation: node.CodeLocation,
+ })
+ defer func() {
+ suite.handleSpecEventEnd(types.SpecEventNodeEnd, event)
+ }()
+
+ var failure types.Failure
+ failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
+ if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ failure.FailureNodeContext = types.FailureNodeIsLeafNode
+ } else if node.NestingLevel <= 0 {
+ failure.FailureNodeContext = types.FailureNodeAtTopLevel
+ } else {
+ failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1
+ }
+ var outcome types.SpecState
+
+ gracePeriod := suite.config.GracePeriod
+ if node.GracePeriod >= 0 {
+ gracePeriod = node.GracePeriod
+ }
+
+ now := time.Now()
+ deadline := suite.deadline
+ timeoutInPlay := "suite"
+ if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) {
+ deadline = specDeadline
+ timeoutInPlay = "spec"
+ }
+ if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) {
+ deadline = now.Add(node.NodeTimeout)
+ timeoutInPlay = "node"
+ }
+ if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
+ // we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
+ if node.NodeTimeout > 0 {
+ deadline = now.Add(node.NodeTimeout)
+ timeoutInPlay = "node"
+ } else {
+ deadline = now.Add(gracePeriod)
+ timeoutInPlay = "grace period"
+ }
+ }
+
+ if !node.HasContext {
+ // this maps onto the pre-context behavior:
+ // - an interrupted node exits immediately. with this, context-less nodes that are in a spec with a SpecTimeout and/or are interrupted by other means will simply exit immediately after the timeout/interrupt
+ // - clean up nodes have up to GracePeriod (formerly hard-coded at 30s) to complete before they are interrupted
+ gracePeriod = 0
+ }
+
+ sc := NewSpecContext(suite)
+ defer sc.cancel(fmt.Errorf("spec has finished"))
+
+ suite.selectiveLock.Lock()
+ suite.currentSpecContext = sc
+ suite.selectiveLock.Unlock()
+
+ var deadlineChannel <-chan time.Time
+ if !deadline.IsZero() {
+ deadlineChannel = time.After(deadline.Sub(now))
+ }
+ var gracePeriodChannel <-chan time.Time
+
+ outcomeC := make(chan types.SpecState)
+ failureC := make(chan types.Failure)
+
+ go func() {
+ finished := false
+ defer func() {
+ if e := recover(); e != nil || !finished {
+ suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e)
+ }
+
+ outcomeFromRun, failureFromRun := suite.failer.Drain()
+ failureFromRun.TimelineLocation = suite.generateTimelineLocation()
+ outcomeC <- outcomeFromRun
+ failureC <- failureFromRun
+ }()
+
+ node.Body(sc)
+ finished = true
+ }()
+
+ // progress polling timer and channel
+ var emitProgressNow <-chan time.Time
+ var progressPoller *time.Timer
+ var pollProgressAfter, pollProgressInterval = suite.config.PollProgressAfter, suite.config.PollProgressInterval
+ if node.PollProgressAfter >= 0 {
+ pollProgressAfter = node.PollProgressAfter
+ }
+ if node.PollProgressInterval >= 0 {
+ pollProgressInterval = node.PollProgressInterval
+ }
+ if pollProgressAfter > 0 {
+ progressPoller = time.NewTimer(pollProgressAfter)
+ emitProgressNow = progressPoller.C
+ defer progressPoller.Stop()
+ }
+
+ // now we wait for an outcome, an interrupt, a timeout, or a progress poll
+ for {
+ select {
+ case outcomeFromRun := <-outcomeC:
+ failureFromRun := <-failureC
+ if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) {
+ // we've already been interrupted/timed out. we just managed to actually exit
+ // before the grace period elapsed
+ // if we have a failure message we attach it as an additional failure
+ if outcomeFromRun != types.SpecStatePassed {
+ additionalFailure := types.AdditionalFailure{
+ State: outcomeFromRun,
+ Failure: failure, // we make a copy - this will include all the configuration set up above...
+ }
+ // ...and then we update the failure with the details from failureFromRun
+ additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
+ additionalFailure.Failure.ProgressReport = types.ProgressReport{}
+ if outcome == types.SpecStateTimedout {
+ additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message)
+ } else {
+ additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message)
+ }
+ suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure)
+ failure.AdditionalFailure = &additionalFailure
+ }
+ return outcome, failure
+ }
+ if outcomeFromRun.Is(types.SpecStatePassed) {
+ return outcomeFromRun, types.Failure{}
+ } else {
+ failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
+ suite.reporter.EmitFailure(outcomeFromRun, failure)
+ return outcomeFromRun, failure
+ }
+ case <-gracePeriodChannel:
+ if node.HasContext && outcome.Is(types.SpecStateTimedout) {
+ report := suite.generateProgressReport(false)
+ report.Message = "{{bold}}{{orange}}A running node failed to exit in time{{/}}\nGinkgo is moving on but a node has timed out and failed to exit before its grace period elapsed. The node has now leaked and is running in the background.\nHere's a current progress report:"
+ suite.emitProgressReport(report)
+ }
+ return outcome, failure
+ case <-deadlineChannel:
+ // we're out of time - the outcome is a timeout and we capture the failure and progress report
+ outcome = types.SpecStateTimedout
+ failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation()
+ failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput()
+ failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay)
+ deadlineChannel = nil
+ suite.reporter.EmitFailure(outcome, failure)
+
+ // tell the spec to stop. it's important we generate the progress report first to make sure we capture where
+ // the spec is actually stuck
+ sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay))
+ // and now we wait for the grace period
+ gracePeriodChannel = time.After(gracePeriod)
+ case <-interruptStatus.Channel:
+ interruptStatus = suite.interruptHandler.Status()
+ // ignore interruption from other process if we are cleaning up or reporting
+ if interruptStatus.Cause == interrupt_handler.InterruptCauseAbortByOtherProcess &&
+ node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) {
+ continue
+ }
+
+ deadlineChannel = nil // don't worry about deadlines, time's up now
+
+ failureTimelineLocation := suite.generateTimelineLocation()
+ progressReport := suite.generateProgressReport(true)
+
+ if outcome == types.SpecStateInvalid {
+ outcome = types.SpecStateInterrupted
+ failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation
+ if interruptStatus.ShouldIncludeProgressReport() {
+ failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput()
+ failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}"
+ }
+ suite.reporter.EmitFailure(outcome, failure)
+ }
+
+ progressReport = progressReport.WithoutOtherGoroutines()
+ sc.cancel(fmt.Errorf(interruptStatus.Message()))
+
+ if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
+ if interruptStatus.ShouldIncludeProgressReport() {
+ progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
+ suite.emitProgressReport(progressReport)
+ }
+ return outcome, failure
+ }
+ if interruptStatus.ShouldIncludeProgressReport() {
+ if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport {
+ progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
+ } else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly {
+ progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
+ }
+ suite.emitProgressReport(progressReport)
+ }
+
+ if gracePeriodChannel == nil {
+ // we haven't given grace yet... so let's
+ gracePeriodChannel = time.After(gracePeriod)
+ } else {
+ // we've already given grace. time's up. now.
+ return outcome, failure
+ }
+ case <-emitProgressNow:
+ report := suite.generateProgressReport(false)
+ report.Message = "{{bold}}Automatically polling progress:{{/}}"
+ suite.emitProgressReport(report)
+ if pollProgressInterval > 0 {
+ progressPoller.Reset(pollProgressInterval)
+ }
+ }
+ }
+}
+
+// TODO: search for usages and consider if reporter.EmitFailure() is necessary
+func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
+ return types.Failure{
+ Message: message,
+ Location: node.CodeLocation,
+ TimelineLocation: suite.generateTimelineLocation(),
+ FailureNodeContext: types.FailureNodeIsLeafNode,
+ FailureNodeType: node.NodeType,
+ FailureNodeLocation: node.CodeLocation,
+ }
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite_patch.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite_patch.go
new file mode 100644
index 0000000..29eae02
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite_patch.go
@@ -0,0 +1,71 @@
+package internal
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type AnnotateFunc func(testName string, test types.TestSpec)
+
+func (suite *Suite) SetAnnotateFn(fn AnnotateFunc) {
+ suite.annotateFn = fn
+}
+
+func (suite *Suite) GetReport() types.Report {
+ return suite.report
+}
+
+func (suite *Suite) WalkTests(fn AnnotateFunc) {
+ if suite.phase != PhaseBuildTree {
+ panic("cannot run before building the tree = call suite.BuildTree() first")
+ }
+ ApplyNestedFocusPolicyToTree(suite.tree)
+ specs := GenerateSpecsFromTreeRoot(suite.tree)
+ for _, spec := range specs {
+ fn(spec.Text(), spec)
+ }
+}
+
+func (suite *Suite) InPhaseBuildTree() bool {
+ return suite.phase == PhaseBuildTree
+}
+
+func (suite *Suite) ClearBeforeAndAfterSuiteNodes() {
+ // Don't build the tree multiple times, it results in multiple initing of tests
+ if !suite.InPhaseBuildTree() {
+ suite.BuildTree()
+ }
+ newNodes := Nodes{}
+ for _, node := range suite.suiteNodes {
+ if node.NodeType == types.NodeTypeBeforeSuite || node.NodeType == types.NodeTypeAfterSuite || node.NodeType == types.NodeTypeSynchronizedBeforeSuite || node.NodeType == types.NodeTypeSynchronizedAfterSuite {
+ continue
+ }
+ newNodes = append(newNodes, node)
+ }
+ suite.suiteNodes = newNodes
+}
+
+func (suite *Suite) RunSpec(spec types.TestSpec, suiteLabels Labels, suiteDescription, suitePath string, failer *Failer, writer WriterInterface, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig) (bool, bool) {
+ if suite.phase != PhaseBuildTree {
+ panic("cannot run before building the tree = call suite.BuildTree() first")
+ }
+
+ suite.phase = PhaseRun
+ suite.client = nil
+ suite.failer = failer
+ suite.reporter = reporters.NewDefaultReporter(reporterConfig, writer)
+ suite.writer = writer
+ suite.outputInterceptor = NoopOutputInterceptor{}
+ if suite.config.Timeout > 0 {
+ suite.deadline = time.Now().Add(suiteConfig.Timeout)
+ }
+ suite.interruptHandler = interrupt_handler.NewInterruptHandler(nil)
+ suite.config = suiteConfig
+
+ success := suite.runSpecs(suiteDescription, suiteLabels, suitePath, false, []Spec{spec.(Spec)})
+
+ return success, false
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
new file mode 100644
index 0000000..73e2655
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
@@ -0,0 +1,210 @@
+package testingtproxy
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type failFunc func(message string, callerSkip ...int)
+type skipFunc func(message string, callerSkip ...int)
+type cleanupFunc func(args ...any)
+type reportFunc func() types.SpecReport
+type addReportEntryFunc func(names string, args ...any)
+type ginkgoWriterInterface interface {
+ io.Writer
+
+ Print(a ...interface{})
+ Printf(format string, a ...interface{})
+ Println(a ...interface{})
+}
+type ginkgoRecoverFunc func()
+type attachProgressReporterFunc func(func() string) func()
+
+func New(writer ginkgoWriterInterface, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, addReportEntry addReportEntryFunc, ginkgoRecover ginkgoRecoverFunc, attachProgressReporter attachProgressReporterFunc, randomSeed int64, parallelProcess int, parallelTotal int, noColor bool, offset int) *ginkgoTestingTProxy {
+ return &ginkgoTestingTProxy{
+ fail: fail,
+ offset: offset,
+ writer: writer,
+ skip: skip,
+ cleanup: cleanup,
+ report: report,
+ addReportEntry: addReportEntry,
+ ginkgoRecover: ginkgoRecover,
+ attachProgressReporter: attachProgressReporter,
+ randomSeed: randomSeed,
+ parallelProcess: parallelProcess,
+ parallelTotal: parallelTotal,
+ f: formatter.NewWithNoColorBool(noColor),
+ }
+}
+
+type ginkgoTestingTProxy struct {
+ fail failFunc
+ skip skipFunc
+ cleanup cleanupFunc
+ report reportFunc
+ offset int
+ writer ginkgoWriterInterface
+ addReportEntry addReportEntryFunc
+ ginkgoRecover ginkgoRecoverFunc
+ attachProgressReporter attachProgressReporterFunc
+ randomSeed int64
+ parallelProcess int
+ parallelTotal int
+ f formatter.Formatter
+}
+
+// basic testing.T support
+
+func (t *ginkgoTestingTProxy) Cleanup(f func()) {
+ t.cleanup(f, internal.Offset(1))
+}
+
+func (t *ginkgoTestingTProxy) Setenv(key, value string) {
+ originalValue, exists := os.LookupEnv(key)
+ if exists {
+ t.cleanup(os.Setenv, key, originalValue, internal.Offset(1))
+ } else {
+ t.cleanup(os.Unsetenv, key, internal.Offset(1))
+ }
+
+ err := os.Setenv(key, value)
+ if err != nil {
+ t.fail(fmt.Sprintf("Failed to set environment variable: %v", err), 1)
+ }
+}
+
+func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
+ t.fail(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
+ t.fail(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fail() {
+ t.fail("failed", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) FailNow() {
+ t.fail("failed", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Failed() bool {
+ return t.report().Failed()
+}
+
+func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
+ t.fail(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
+ t.fail(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Helper() {
+ types.MarkAsHelper(1)
+}
+
+func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
+ fmt.Fprintln(t.writer, args...)
+}
+
+func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
+ t.Log(fmt.Sprintf(format, args...))
+}
+
+func (t *ginkgoTestingTProxy) Name() string {
+ return t.report().FullText()
+}
+
+func (t *ginkgoTestingTProxy) Parallel() {
+ // No-op
+}
+
+func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
+ t.skip(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) SkipNow() {
+ t.skip("skip", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
+ t.skip(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Skipped() bool {
+ return t.report().State.Is(types.SpecStateSkipped)
+}
+
+func (t *ginkgoTestingTProxy) TempDir() string {
+ tmpDir, err := os.MkdirTemp("", "ginkgo")
+ if err != nil {
+ t.fail(fmt.Sprintf("Failed to create temporary directory: %v", err), 1)
+ return ""
+ }
+ t.cleanup(os.RemoveAll, tmpDir)
+
+ return tmpDir
+}
+
+// FullGinkgoTInterface
+func (t *ginkgoTestingTProxy) AddReportEntryVisibilityAlways(name string, args ...any) {
+ finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityAlways}
+ t.addReportEntry(name, append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) AddReportEntryVisibilityFailureOrVerbose(name string, args ...any) {
+ finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose}
+ t.addReportEntry(name, append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) AddReportEntryVisibilityNever(name string, args ...any) {
+ finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityNever}
+ t.addReportEntry(name, append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) Print(a ...any) {
+ t.writer.Print(a...)
+}
+func (t *ginkgoTestingTProxy) Printf(format string, a ...any) {
+ t.writer.Printf(format, a...)
+}
+func (t *ginkgoTestingTProxy) Println(a ...any) {
+ t.writer.Println(a...)
+}
+func (t *ginkgoTestingTProxy) F(format string, args ...any) string {
+ return t.f.F(format, args...)
+}
+func (t *ginkgoTestingTProxy) Fi(indentation uint, format string, args ...any) string {
+ return t.f.Fi(indentation, format, args...)
+}
+func (t *ginkgoTestingTProxy) Fiw(indentation uint, maxWidth uint, format string, args ...any) string {
+ return t.f.Fiw(indentation, maxWidth, format, args...)
+}
+func (t *ginkgoTestingTProxy) RenderTimeline() string {
+ return reporters.RenderTimeline(t.report(), false)
+}
+func (t *ginkgoTestingTProxy) GinkgoRecover() {
+ t.ginkgoRecover()
+}
+func (t *ginkgoTestingTProxy) DeferCleanup(args ...any) {
+ finalArgs := []any{internal.Offset(1)}
+ t.cleanup(append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) RandomSeed() int64 {
+ return t.randomSeed
+}
+func (t *ginkgoTestingTProxy) ParallelProcess() int {
+ return t.parallelProcess
+}
+func (t *ginkgoTestingTProxy) ParallelTotal() int {
+ return t.parallelTotal
+}
+func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() {
+ return t.attachProgressReporter(f)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/tree.go b/vendor/github.com/onsi/ginkgo/v2/internal/tree.go
new file mode 100644
index 0000000..f9d1eeb
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/tree.go
@@ -0,0 +1,77 @@
+package internal
+
+import "github.com/onsi/ginkgo/v2/types"
+
+type TreeNode struct {
+ Node Node
+ Parent *TreeNode
+ Children TreeNodes
+}
+
+func (tn *TreeNode) AppendChild(child *TreeNode) {
+ tn.Children = append(tn.Children, child)
+ child.Parent = tn
+}
+
+func (tn *TreeNode) AncestorNodeChain() Nodes {
+ if tn.Parent == nil || tn.Parent.Node.IsZero() {
+ return Nodes{tn.Node}
+ }
+ return append(tn.Parent.AncestorNodeChain(), tn.Node)
+}
+
+type TreeNodes []*TreeNode
+
+func (tn TreeNodes) Nodes() Nodes {
+ out := make(Nodes, len(tn))
+ for i := range tn {
+ out[i] = tn[i].Node
+ }
+ return out
+}
+
+func (tn TreeNodes) WithID(id uint) *TreeNode {
+ for i := range tn {
+ if tn[i].Node.ID == id {
+ return tn[i]
+ }
+ }
+
+ return nil
+}
+
+func GenerateSpecsFromTreeRoot(tree *TreeNode) Specs {
+ var walkTree func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs
+ walkTree = func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs {
+ tests := Specs{}
+
+ nodes := make(Nodes, len(trees))
+ for i := range trees {
+ nodes[i] = trees[i].Node
+ nodes[i].NestingLevel = nestingLevel
+ }
+
+ for i := range nodes {
+ if !nodes[i].NodeType.Is(types.NodeTypesForContainerAndIt) {
+ continue
+ }
+ leftNodes, rightNodes := nodes.SplitAround(nodes[i])
+ leftNodes = leftNodes.WithoutType(types.NodeTypesForContainerAndIt)
+ rightNodes = rightNodes.WithoutType(types.NodeTypesForContainerAndIt)
+
+ leftNodes = lNodes.CopyAppend(leftNodes...)
+ rightNodes = rightNodes.CopyAppend(rNodes...)
+
+ if nodes[i].NodeType.Is(types.NodeTypeIt) {
+ tests = append(tests, Spec{Nodes: leftNodes.CopyAppend(nodes[i]).CopyAppend(rightNodes...)})
+ } else {
+ treeNode := trees.WithID(nodes[i].ID)
+ tests = append(tests, walkTree(nestingLevel+1, leftNodes.CopyAppend(nodes[i]), rightNodes, treeNode.Children)...)
+ }
+ }
+
+ return tests
+ }
+
+ return walkTree(0, Nodes{}, Nodes{}, tree.Children)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
new file mode 100644
index 0000000..aab42d5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
@@ -0,0 +1,144 @@
+package internal
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/logr/funcr"
+)
+
+type WriterMode uint
+
+const (
+ WriterModeStreamAndBuffer WriterMode = iota
+ WriterModeBufferOnly
+)
+
+type WriterInterface interface {
+ io.Writer
+
+ Truncate()
+ Bytes() []byte
+ Len() int
+}
+
+// Writer implements WriterInterface and GinkgoWriterInterface
+type Writer struct {
+ buffer *bytes.Buffer
+ outWriter io.Writer
+ lock *sync.Mutex
+ mode WriterMode
+
+ streamIndent []byte
+ indentNext bool
+
+ teeWriters []io.Writer
+}
+
+func NewWriter(outWriter io.Writer) *Writer {
+ return &Writer{
+ buffer: &bytes.Buffer{},
+ lock: &sync.Mutex{},
+ outWriter: outWriter,
+ mode: WriterModeStreamAndBuffer,
+ streamIndent: []byte(" "),
+ indentNext: true,
+ }
+}
+
+func (w *Writer) SetMode(mode WriterMode) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.mode = mode
+}
+
+func (w *Writer) Len() int {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ return w.buffer.Len()
+}
+
+var newline = []byte("\n")
+
+func (w *Writer) Write(b []byte) (n int, err error) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ for _, teeWriter := range w.teeWriters {
+ teeWriter.Write(b)
+ }
+
+ if w.mode == WriterModeStreamAndBuffer {
+ line, remaining, found := []byte{}, b, false
+ for len(remaining) > 0 {
+ line, remaining, found = bytes.Cut(remaining, newline)
+ if len(line) > 0 {
+ if w.indentNext {
+ w.outWriter.Write(w.streamIndent)
+ w.indentNext = false
+ }
+ w.outWriter.Write(line)
+ }
+ if found {
+ w.outWriter.Write(newline)
+ w.indentNext = true
+ }
+ }
+ }
+ return w.buffer.Write(b)
+}
+
+func (w *Writer) Truncate() {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.buffer.Reset()
+}
+
+func (w *Writer) Bytes() []byte {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ b := w.buffer.Bytes()
+ copied := make([]byte, len(b))
+ copy(copied, b)
+ return copied
+}
+
+// GinkgoWriterInterface
+func (w *Writer) TeeTo(writer io.Writer) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ w.teeWriters = append(w.teeWriters, writer)
+}
+
+func (w *Writer) ClearTeeWriters() {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ w.teeWriters = []io.Writer{}
+}
+
+func (w *Writer) Print(a ...interface{}) {
+ fmt.Fprint(w, a...)
+}
+
+func (w *Writer) Printf(format string, a ...interface{}) {
+ fmt.Fprintf(w, format, a...)
+}
+
+func (w *Writer) Println(a ...interface{}) {
+ fmt.Fprintln(w, a...)
+}
+
+func GinkgoLogrFunc(writer *Writer) logr.Logger {
+ return funcr.New(func(prefix, args string) {
+ if prefix == "" {
+ writer.Printf("%s\n", args)
+ } else {
+ writer.Printf("%s %s\n", prefix, args)
+ }
+ }, funcr.Options{})
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
new file mode 100644
index 0000000..4807304
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
@@ -0,0 +1,788 @@
+/*
+Ginkgo's Default Reporter
+
+A number of command line flags are available to tweak Ginkgo's default output.
+
+These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
+*/
+package reporters
+
+import (
+ "fmt"
+ "io"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type DefaultReporter struct {
+ conf types.ReporterConfig
+ writer io.Writer
+
+ // managing the emission stream
+ lastCharWasNewline bool
+ lastEmissionWasDelimiter bool
+
+ // rendering
+ specDenoter string
+ retryDenoter string
+ formatter formatter.Formatter
+
+ runningInParallel bool
+ lock *sync.Mutex
+}
+
+func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
+ reporter := NewDefaultReporter(conf, writer)
+ reporter.formatter = formatter.New(formatter.ColorModePassthrough)
+
+ return reporter
+}
+
+func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
+ reporter := &DefaultReporter{
+ conf: conf,
+ writer: writer,
+
+ lastCharWasNewline: true,
+ lastEmissionWasDelimiter: false,
+
+ specDenoter: "•",
+ retryDenoter: "↺",
+ formatter: formatter.NewWithNoColorBool(conf.NoColor),
+ lock: &sync.Mutex{},
+ }
+ if runtime.GOOS == "windows" {
+ reporter.specDenoter = "+"
+ reporter.retryDenoter = "R"
+ }
+
+ return reporter
+}
+
+/* The Reporter Interface */
+
+func (r *DefaultReporter) SuiteWillBegin(report types.Report) {
+ if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) {
+ r.emit(r.f("[%d] {{bold}}%s{{/}} ", report.SuiteConfig.RandomSeed, report.SuiteDescription))
+ if len(report.SuiteLabels) > 0 {
+ r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", ")))
+ }
+ r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
+ if report.SuiteConfig.ParallelTotal > 1 {
+ r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal))
+ }
+ } else {
+ banner := r.f("Running Suite: %s - %s", report.SuiteDescription, report.SuitePath)
+ r.emitBlock(banner)
+ bannerWidth := len(banner)
+ if len(report.SuiteLabels) > 0 {
+ labels := strings.Join(report.SuiteLabels, ", ")
+ r.emitBlock(r.f("{{coral}}[%s]{{/}} ", labels))
+ if len(labels)+2 > bannerWidth {
+ bannerWidth = len(labels) + 2
+ }
+ }
+ r.emitBlock(strings.Repeat("=", bannerWidth))
+
+ out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed)
+ if report.SuiteConfig.RandomizeAllSpecs {
+ out += r.f(" - will randomize all specs")
+ }
+ r.emitBlock(out)
+ r.emit("\n")
+ r.emitBlock(r.f("Will run {{bold}}%d{{/}} of {{bold}}%d{{/}} specs", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
+ if report.SuiteConfig.ParallelTotal > 1 {
+ r.emitBlock(r.f("Running in parallel across {{bold}}%d{{/}} processes", report.SuiteConfig.ParallelTotal))
+ }
+ }
+}
+
+func (r *DefaultReporter) SuiteDidEnd(report types.Report) {
+ failures := report.SpecReports.WithState(types.SpecStateFailureStates)
+ if len(failures) > 0 {
+ r.emitBlock("\n")
+ if len(failures) > 1 {
+ r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures)))
+ } else {
+ r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}"))
+ }
+ for _, specReport := range failures {
+ highlightColor, heading := "{{red}}", "[FAIL]"
+ switch specReport.State {
+ case types.SpecStatePanicked:
+ highlightColor, heading = "{{magenta}}", "[PANICKED!]"
+ case types.SpecStateAborted:
+ highlightColor, heading = "{{coral}}", "[ABORTED]"
+ case types.SpecStateTimedout:
+ highlightColor, heading = "{{orange}}", "[TIMEDOUT]"
+ case types.SpecStateInterrupted:
+ highlightColor, heading = "{{orange}}", "[INTERRUPTED]"
+ }
+ locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true)
+ r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock))
+ }
+ }
+
+ //summarize the suite
+ if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded {
+ r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime))
+ return
+ }
+
+ r.emitBlock("\n")
+ color, status := "{{green}}{{bold}}", "SUCCESS!"
+ if !report.SuiteSucceeded {
+ color, status = "{{red}}{{bold}}", "FAIL!"
+ }
+
+ specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes
+ r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}",
+ specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates),
+ report.PreRunStats.TotalSpecs,
+ report.RunTime.Seconds()),
+ )
+
+ switch len(report.SpecialSuiteFailureReasons) {
+ case 0:
+ r.emit(r.f(color+"%s{{/}} -- ", status))
+ case 1:
+ r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0]))
+ default:
+ r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", ")))
+ }
+
+ if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 {
+ r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n"))
+ } else {
+ r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed)))
+ r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates)))
+ if specs.CountOfFlakedSpecs() > 0 {
+ r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs()))
+ }
+ if specs.CountOfRepeatedSpecs() > 0 {
+ r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs()))
+ }
+ r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending)))
+ r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped)))
+ }
+}
+
+func (r *DefaultReporter) WillRun(report types.SpecReport) {
+ v := r.conf.Verbosity()
+ if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel {
+ return
+ }
+
+ r.emitDelimiter(0)
+ r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
+}
+
+func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) {
+ r.emitBlock("\n")
+ if r.conf.GithubOutput {
+ r.emitBlock(r.fi(1, "::group::%s", sectionName))
+ } else {
+ r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName))
+ }
+ fn()
+ if r.conf.GithubOutput {
+ r.emitBlock(r.fi(1, "::endgroup::"))
+ } else {
+ r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName))
+ }
+
+}
+
+func (r *DefaultReporter) DidRun(report types.SpecReport) {
+ v := r.conf.Verbosity()
+ inParallel := report.RunningInParallel
+
+ //should we completely omit this spec?
+ if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips {
+ return
+ }
+
+ header := r.specDenoter
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ header = fmt.Sprintf("[%s]", report.LeafNodeType)
+ }
+ highlightColor := r.highlightColorForState(report.State)
+
+ // have we already been streaming the timeline?
+ timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel
+
+ // should we show the timeline?
+ var timeline types.Timeline
+ showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed())
+ if showTimeline {
+ timeline = report.Timeline().WithoutHiddenReportEntries()
+ keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) ||
+ (v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) ||
+ (report.Failed() && r.conf.ShowNodeEvents)
+ if !keepVeryVerboseSpecEvents {
+ timeline = timeline.WithoutVeryVerboseSpecEvents()
+ }
+ if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" {
+ // the timeline is completely empty - don't show it
+ showTimeline = false
+ }
+ if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 {
+ //if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report
+ failure, isFailure := timeline[0].(types.Failure)
+ if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) {
+ showTimeline = false
+ }
+ }
+ }
+
+ // should we have a separate section for always-visible reports?
+ showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways)
+
+ // should we have a separate section for captured stdout/stderr
+ showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "")
+
+ // given all that - do we have any actual content to show? or are we a single denoter in a stream?
+ reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped))
+
+ // should we show a runtime?
+ includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "")
+
+ // should we show the codelocation block?
+ showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed)
+
+ switch report.State {
+ case types.SpecStatePassed:
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent {
+ return
+ }
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ header = fmt.Sprintf("%s PASSED", header)
+ }
+ if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 {
+ header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true
+ }
+ case types.SpecStatePending:
+ header = "P"
+ if v.GT(types.VerbosityLevelSuccinct) {
+ header, reportHasContent = "P [PENDING]", true
+ }
+ case types.SpecStateSkipped:
+ header = "S"
+ if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") {
+ header, reportHasContent = "S [SKIPPED]", true
+ }
+ default:
+ header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State))
+ if report.MaxMustPassRepeatedly > 1 {
+ header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts)
+ }
+ }
+
+ // If we have no content to show, just emit the header and return
+ if !reportHasContent {
+ r.emit(r.f(highlightColor + header + "{{/}}"))
+ if r.conf.ForceNewlines {
+ r.emit("\n")
+ }
+ return
+ }
+
+ if includeRuntime {
+ header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds())
+ }
+
+ // Emit header
+ if !timelineHasBeenStreaming {
+ r.emitDelimiter(0)
+ }
+ r.emitBlock(r.f(highlightColor + header + "{{/}}"))
+ if showCodeLocation {
+ r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false))
+ }
+
+ //Emit Stdout/Stderr Output
+ if showSeparateStdSection {
+ r.wrapTextBlock("Captured StdOut/StdErr Output", func() {
+ r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
+ })
+ }
+
+ if showSeparateVisibilityAlwaysReportsSection {
+ r.wrapTextBlock("Report Entries", func() {
+ for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
+ r.emitReportEntry(1, entry)
+ }
+ })
+ }
+
+ if showTimeline {
+ r.wrapTextBlock("Timeline", func() {
+ r.emitTimeline(1, report, timeline)
+ })
+ }
+
+ // Emit Failure Message
+ if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) {
+ r.emitBlock("\n")
+ r.emitFailure(1, report.State, report.Failure, true)
+ if len(report.AdditionalFailures) > 0 {
+ r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}"))
+ }
+ }
+
+ r.emitDelimiter(0)
+}
+
+func (r *DefaultReporter) highlightColorForState(state types.SpecState) string {
+ switch state {
+ case types.SpecStatePassed:
+ return "{{green}}"
+ case types.SpecStatePending:
+ return "{{yellow}}"
+ case types.SpecStateSkipped:
+ return "{{cyan}}"
+ case types.SpecStateFailed:
+ return "{{red}}"
+ case types.SpecStateTimedout:
+ return "{{orange}}"
+ case types.SpecStatePanicked:
+ return "{{magenta}}"
+ case types.SpecStateInterrupted:
+ return "{{orange}}"
+ case types.SpecStateAborted:
+ return "{{coral}}"
+ default:
+ return "{{gray}}"
+ }
+}
+
+func (r *DefaultReporter) humanReadableState(state types.SpecState) string {
+ return strings.ToUpper(state.String())
+}
+
+func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) {
+ isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)
+ gw := report.CapturedGinkgoWriterOutput
+ cursor := 0
+ for _, entry := range timeline {
+ tl := entry.GetTimelineLocation()
+ if tl.Offset < len(gw) {
+ r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset]))
+ cursor = tl.Offset
+ } else if cursor < len(gw) {
+ r.emit(r.fi(indent, "%s", gw[cursor:]))
+ cursor = len(gw)
+ }
+ switch x := entry.(type) {
+ case types.Failure:
+ if isVeryVerbose {
+ r.emitFailure(indent, report.State, x, false)
+ } else {
+ r.emitShortFailure(indent, report.State, x)
+ }
+ case types.AdditionalFailure:
+ if isVeryVerbose {
+ r.emitFailure(indent, x.State, x.Failure, true)
+ } else {
+ r.emitShortFailure(indent, x.State, x.Failure)
+ }
+ case types.ReportEntry:
+ r.emitReportEntry(indent, x)
+ case types.ProgressReport:
+ r.emitProgressReport(indent, false, x)
+ case types.SpecEvent:
+ if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents {
+ r.emitSpecEvent(indent, x, isVeryVerbose)
+ }
+ }
+ }
+ if cursor < len(gw) {
+ r.emit(r.fi(indent, "%s", gw[cursor:]))
+ }
+}
+
+func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) {
+ if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) {
+ r.emitShortFailure(1, state, failure)
+ } else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) {
+ r.emitFailure(1, state, failure, true)
+ }
+}
+
+func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) {
+ r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}",
+ r.humanReadableState(state),
+ failure.FailureNodeType,
+ failure.Location,
+ failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT),
+ ))
+}
+
+func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
+ highlightColor := r.highlightColorForState(state)
+ r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
+ if r.conf.GithubOutput {
+ level := "error"
+ if state.Is(types.SpecStateSkipped) {
+ level = "notice"
+ }
+ r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ } else {
+ r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ }
+ if failure.ForwardedPanic != "" {
+ r.emitBlock("\n")
+ r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
+ }
+
+ if r.conf.FullTrace || failure.ForwardedPanic != "" {
+ r.emitBlock("\n")
+ r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}"))
+ r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace))
+ }
+
+ if !failure.ProgressReport.IsZero() {
+ r.emitBlock("\n")
+ r.emitProgressReport(indent, false, failure.ProgressReport)
+ }
+
+ if failure.AdditionalFailure != nil && includeAdditionalFailure {
+ r.emitBlock("\n")
+ r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true)
+ }
+}
+
+func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) {
+ r.emitDelimiter(1)
+
+ if report.RunningInParallel {
+ r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess))
+ }
+ shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)
+ r.emitProgressReport(1, shouldEmitGW, report)
+ r.emitDelimiter(1)
+}
+
+func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) {
+ if report.Message != "" {
+ r.emitBlock(r.fi(indent, report.Message+"\n"))
+ indent += 1
+ }
+ if report.LeafNodeText != "" {
+ subjectIndent := indent
+ if len(report.ContainerHierarchyTexts) > 0 {
+ r.emit(r.fi(indent, r.cycleJoin(report.ContainerHierarchyTexts, " ")))
+ r.emit(" ")
+ subjectIndent = 0
+ }
+ r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond)))
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation))
+ indent += 1
+ }
+ if report.CurrentNodeType != types.NodeTypeInvalid {
+ r.emit(r.fi(indent, "In {{bold}}{{orange}}[%s]{{/}}", report.CurrentNodeType))
+ if report.CurrentNodeText != "" && !report.CurrentNodeType.Is(types.NodeTypeIt) {
+ r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText))
+ }
+
+ r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond)))
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation))
+ indent += 1
+ }
+ if report.CurrentStepText != "" {
+ r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond)))
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation))
+ indent += 1
+ }
+
+ if indent > 0 {
+ indent -= 1
+ }
+
+ if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" {
+ r.emit("\n")
+ r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}"))
+ limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n")
+ if len(lines) <= limit {
+ r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput))
+ } else {
+ r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}"))
+ for _, line := range lines[len(lines)-limit-1:] {
+ r.emitBlock(r.fi(indent+1, "%s", line))
+ }
+ }
+ r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}"))
+ }
+
+ if !report.SpecGoroutine().IsZero() {
+ r.emit("\n")
+ r.emit(r.fi(indent, "{{bold}}{{underline}}Spec Goroutine{{/}}\n"))
+ r.emitGoroutines(indent, report.SpecGoroutine())
+ }
+
+ if len(report.AdditionalReports) > 0 {
+ r.emit("\n")
+ r.emitBlock(r.fi(indent, "{{gray}}Begin Additional Progress Reports >>{{/}}"))
+ for i, additionalReport := range report.AdditionalReports {
+ r.emit(r.fi(indent+1, additionalReport))
+ if i < len(report.AdditionalReports)-1 {
+ r.emitBlock(r.fi(indent+1, "{{gray}}%s{{/}}", strings.Repeat("-", 10)))
+ }
+ }
+ r.emitBlock(r.fi(indent, "{{gray}}<< End Additional Progress Reports{{/}}"))
+ }
+
+ highlightedGoroutines := report.HighlightedGoroutines()
+ if len(highlightedGoroutines) > 0 {
+ r.emit("\n")
+ r.emit(r.fi(indent, "{{bold}}{{underline}}Goroutines of Interest{{/}}\n"))
+ r.emitGoroutines(indent, highlightedGoroutines...)
+ }
+
+ otherGoroutines := report.OtherGoroutines()
+ if len(otherGoroutines) > 0 {
+ r.emit("\n")
+ r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n"))
+ r.emitGoroutines(indent, otherGoroutines...)
+ }
+}
+
+func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) {
+ if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever {
+ return
+ }
+ r.emitReportEntry(1, entry)
+}
+
+func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) {
+ r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))))
+ if representation := entry.StringRepresentation(); representation != "" {
+ r.emitBlock(r.fi(indent+1, representation))
+ }
+}
+
+func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) {
+ v := r.conf.Verbosity()
+ if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) {
+ r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose))
+ }
+}
+
+func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) {
+ location := ""
+ if includeLocation {
+ location = fmt.Sprintf("- %s ", event.CodeLocation.String())
+ }
+ switch event.SpecEventType {
+ case types.SpecEventInvalid:
+ return
+ case types.SpecEventByStart:
+ r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ case types.SpecEventByEnd:
+ r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond)))
+ case types.SpecEventNodeStart:
+ r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ case types.SpecEventNodeEnd:
+ r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond)))
+ case types.SpecEventSpecRepeat:
+ r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ case types.SpecEventSpecRetry:
+ r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ }
+}
+
+func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) {
+ for idx, g := range goroutines {
+ color := "{{gray}}"
+ if g.HasHighlights() {
+ color = "{{orange}}"
+ }
+ r.emit(r.fi(indent, color+"goroutine %d [%s]{{/}}\n", g.ID, g.State))
+ for _, fc := range g.Stack {
+ if fc.Highlight {
+ r.emit(r.fi(indent, color+"{{bold}}> %s{{/}}\n", fc.Function))
+ r.emit(r.fi(indent+2, color+"{{bold}}%s:%d{{/}}\n", fc.Filename, fc.Line))
+ r.emitSource(indent+3, fc)
+ } else {
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", fc.Function))
+ r.emit(r.fi(indent+2, "{{gray}}%s:%d{{/}}\n", fc.Filename, fc.Line))
+ }
+ }
+
+ if idx+1 < len(goroutines) {
+ r.emit("\n")
+ }
+ }
+}
+
+func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) {
+ lines := fc.Source
+ if len(lines) == 0 {
+ return
+ }
+
+ lTrim := 100000
+ for _, line := range lines {
+ lTrimLine := len(line) - len(strings.TrimLeft(line, " \t"))
+ if lTrimLine < lTrim && len(line) > 0 {
+ lTrim = lTrimLine
+ }
+ }
+ if lTrim == 100000 {
+ lTrim = 0
+ }
+
+ for idx, line := range lines {
+ if len(line) > lTrim {
+ line = line[lTrim:]
+ }
+ if idx == fc.SourceHighlight {
+ r.emit(r.fi(indent, "{{bold}}{{orange}}> %s{{/}}\n", line))
+ } else {
+ r.emit(r.fi(indent, "| %s\n", line))
+ }
+ }
+}
+
+/* Emitting to the writer */
+func (r *DefaultReporter) emit(s string) {
+ r._emit(s, false, false)
+}
+
+func (r *DefaultReporter) emitBlock(s string) {
+ r._emit(s, true, false)
+}
+
+func (r *DefaultReporter) emitDelimiter(indent uint) {
+ r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true)
+}
+
+// a bit ugly - but we're trying to minimize locking on this hot codepath
+func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) {
+ if len(s) == 0 {
+ return
+ }
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ if isDelimiter && r.lastEmissionWasDelimiter {
+ return
+ }
+ if block && !r.lastCharWasNewline {
+ r.writer.Write([]byte("\n"))
+ }
+ r.lastCharWasNewline = (s[len(s)-1:] == "\n")
+ r.writer.Write([]byte(s))
+ if block && !r.lastCharWasNewline {
+ r.writer.Write([]byte("\n"))
+ r.lastCharWasNewline = true
+ }
+ r.lastEmissionWasDelimiter = isDelimiter
+}
+
+/* Rendering text */
+func (r *DefaultReporter) f(format string, args ...interface{}) string {
+ return r.formatter.F(format, args...)
+}
+
+func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string {
+ return r.formatter.Fi(indentation, format, args...)
+}
+
+func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string {
+ return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"})
+}
+
+func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string {
+ texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{}
+ texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...)
+
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText))
+ } else {
+ texts = append(texts, r.f(report.LeafNodeText))
+ }
+ labels = append(labels, report.LeafNodeLabels)
+ locations = append(locations, report.LeafNodeLocation)
+
+ failureLocation := report.Failure.FailureNodeLocation
+ if usePreciseFailureLocation {
+ failureLocation = report.Failure.Location
+ }
+
+ highlightIndex := -1
+ switch report.Failure.FailureNodeContext {
+ case types.FailureNodeAtTopLevel:
+ texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...)
+ locations = append([]types.CodeLocation{failureLocation}, locations...)
+ labels = append([][]string{{}}, labels...)
+ highlightIndex = 0
+ case types.FailureNodeInContainer:
+ i := report.Failure.FailureNodeContainerIndex
+ texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType)
+ locations[i] = failureLocation
+ highlightIndex = i
+ case types.FailureNodeIsLeafNode:
+ i := len(texts) - 1
+ texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText)
+ locations[i] = failureLocation
+ highlightIndex = i
+ default:
+ //there is no failure, so we highlight the leaf ndoe
+ highlightIndex = len(texts) - 1
+ }
+
+ out := ""
+ if veryVerbose {
+ for i := range texts {
+ if i == highlightIndex {
+ out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i])
+ } else {
+ out += r.fi(uint(i), "%s", texts[i])
+ }
+ if len(labels[i]) > 0 {
+ out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", "))
+ }
+ out += "\n"
+ out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i])
+ }
+ } else {
+ for i := range texts {
+ style := "{{/}}"
+ if i%2 == 1 {
+ style = "{{gray}}"
+ }
+ if i == highlightIndex {
+ style = highlightColor + "{{bold}}"
+ }
+ out += r.f(style+"%s", texts[i])
+ if i < len(texts)-1 {
+ out += " "
+ } else {
+ out += r.f("{{/}}")
+ }
+ }
+ flattenedLabels := report.Labels()
+ if len(flattenedLabels) > 0 {
+ out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", "))
+ }
+ out += "\n"
+ if usePreciseFailureLocation {
+ out += r.f("{{gray}}%s{{/}}", failureLocation)
+ } else {
+ leafLocation := locations[len(locations)-1]
+ if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) {
+ out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation)
+ out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation)
+ } else {
+ out += r.f("{{gray}}%s{{/}}", leafLocation)
+ }
+ }
+
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
new file mode 100644
index 0000000..613072e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
@@ -0,0 +1,149 @@
+package reporters
+
+import (
+ "github.com/onsi/ginkgo/v2/config"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+// Deprecated: DeprecatedReporter was how Ginkgo V1 provided support for CustomReporters
+// this has been removed in V2.
+// Please read the documentation at:
+// https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+// for Ginkgo's new behavior and for a migration path.
+type DeprecatedReporter interface {
+ SuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
+ BeforeSuiteDidRun(setupSummary *types.SetupSummary)
+ SpecWillRun(specSummary *types.SpecSummary)
+ SpecDidComplete(specSummary *types.SpecSummary)
+ AfterSuiteDidRun(setupSummary *types.SetupSummary)
+ SuiteDidEnd(summary *types.SuiteSummary)
+}
+
+// ReportViaDeprecatedReporter takes a V1 custom reporter and a V2 report and
+// calls the custom reporter's methods with appropriately transformed data from the V2 report.
+//
+// ReportViaDeprecatedReporter should be called in a `ReportAfterSuite()`
+//
+// Deprecated: ReportViaDeprecatedReporter method exists to help developer bridge between deprecated V1 functionality and the new
+// reporting support in V2. It will be removed in a future minor version of Ginkgo.
+func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Report) {
+ conf := config.DeprecatedGinkgoConfigType{
+ RandomSeed: report.SuiteConfig.RandomSeed,
+ RandomizeAllSpecs: report.SuiteConfig.RandomizeAllSpecs,
+ FocusStrings: report.SuiteConfig.FocusStrings,
+ SkipStrings: report.SuiteConfig.SkipStrings,
+ FailOnPending: report.SuiteConfig.FailOnPending,
+ FailFast: report.SuiteConfig.FailFast,
+ FlakeAttempts: report.SuiteConfig.FlakeAttempts,
+ EmitSpecProgress: false,
+ DryRun: report.SuiteConfig.DryRun,
+ ParallelNode: report.SuiteConfig.ParallelProcess,
+ ParallelTotal: report.SuiteConfig.ParallelTotal,
+ SyncHost: report.SuiteConfig.ParallelHost,
+ StreamHost: report.SuiteConfig.ParallelHost,
+ }
+
+ summary := &types.DeprecatedSuiteSummary{
+ SuiteDescription: report.SuiteDescription,
+ SuiteID: report.SuitePath,
+
+ NumberOfSpecsBeforeParallelization: report.PreRunStats.TotalSpecs,
+ NumberOfTotalSpecs: report.PreRunStats.TotalSpecs,
+ NumberOfSpecsThatWillBeRun: report.PreRunStats.SpecsThatWillRun,
+ }
+
+ reporter.SuiteWillBegin(conf, summary)
+
+ for _, spec := range report.SpecReports {
+ switch spec.LeafNodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
+ setupSummary := &types.DeprecatedSetupSummary{
+ ComponentType: spec.LeafNodeType,
+ CodeLocation: spec.LeafNodeLocation,
+ State: spec.State,
+ RunTime: spec.RunTime,
+ Failure: failureFor(spec),
+ CapturedOutput: spec.CombinedOutput(),
+ SuiteID: report.SuitePath,
+ }
+ reporter.BeforeSuiteDidRun(setupSummary)
+ case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
+ setupSummary := &types.DeprecatedSetupSummary{
+ ComponentType: spec.LeafNodeType,
+ CodeLocation: spec.LeafNodeLocation,
+ State: spec.State,
+ RunTime: spec.RunTime,
+ Failure: failureFor(spec),
+ CapturedOutput: spec.CombinedOutput(),
+ SuiteID: report.SuitePath,
+ }
+ reporter.AfterSuiteDidRun(setupSummary)
+ case types.NodeTypeIt:
+ componentTexts, componentCodeLocations := []string{}, []types.CodeLocation{}
+ componentTexts = append(componentTexts, spec.ContainerHierarchyTexts...)
+ componentCodeLocations = append(componentCodeLocations, spec.ContainerHierarchyLocations...)
+ componentTexts = append(componentTexts, spec.LeafNodeText)
+ componentCodeLocations = append(componentCodeLocations, spec.LeafNodeLocation)
+
+ specSummary := &types.DeprecatedSpecSummary{
+ ComponentTexts: componentTexts,
+ ComponentCodeLocations: componentCodeLocations,
+ State: spec.State,
+ RunTime: spec.RunTime,
+ Failure: failureFor(spec),
+ NumberOfSamples: spec.NumAttempts,
+ CapturedOutput: spec.CombinedOutput(),
+ SuiteID: report.SuitePath,
+ }
+ reporter.SpecWillRun(specSummary)
+ reporter.SpecDidComplete(specSummary)
+
+ switch spec.State {
+ case types.SpecStatePending:
+ summary.NumberOfPendingSpecs += 1
+ case types.SpecStateSkipped:
+ summary.NumberOfSkippedSpecs += 1
+ case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateInterrupted:
+ summary.NumberOfFailedSpecs += 1
+ case types.SpecStatePassed:
+ summary.NumberOfPassedSpecs += 1
+ if spec.NumAttempts > 1 {
+ summary.NumberOfFlakedSpecs += 1
+ }
+ }
+ }
+ }
+
+ summary.SuiteSucceeded = report.SuiteSucceeded
+ summary.RunTime = report.RunTime
+
+ reporter.SuiteDidEnd(summary)
+}
+
+func failureFor(spec types.SpecReport) types.DeprecatedSpecFailure {
+ if spec.Failure.IsZero() {
+ return types.DeprecatedSpecFailure{}
+ }
+
+ index := 0
+ switch spec.Failure.FailureNodeContext {
+ case types.FailureNodeInContainer:
+ index = spec.Failure.FailureNodeContainerIndex
+ case types.FailureNodeAtTopLevel:
+ index = -1
+ case types.FailureNodeIsLeafNode:
+ index = len(spec.ContainerHierarchyTexts) - 1
+ if spec.LeafNodeText != "" {
+ index += 1
+ }
+ }
+
+ return types.DeprecatedSpecFailure{
+ Message: spec.Failure.Message,
+ Location: spec.Failure.Location,
+ ForwardedPanic: spec.Failure.ForwardedPanic,
+ ComponentIndex: index,
+ ComponentType: spec.Failure.FailureNodeType,
+ ComponentCodeLocation: spec.Failure.FailureNodeLocation,
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
new file mode 100644
index 0000000..5d3e8db
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
@@ -0,0 +1,69 @@
+package reporters
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+// GenerateJSONReport produces a JSON-formatted report at the passed in destination
+func GenerateJSONReport(report types.Report, destination string) error {
+ if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
+ return err
+ }
+ f, err := os.Create(destination)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ enc := json.NewEncoder(f)
+ enc.SetIndent("", " ")
+ err = enc.Encode([]types.Report{
+ report,
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources
+// It skips over reports that fail to decode but reports on them via the returned messages []string
+func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) {
+ messages := []string{}
+ allReports := []types.Report{}
+ for _, source := range sources {
+ reports := []types.Report{}
+ data, err := os.ReadFile(source)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
+ continue
+ }
+ err = json.Unmarshal(data, &reports)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
+ continue
+ }
+ os.Remove(source)
+ allReports = append(allReports, reports...)
+ }
+
+ if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
+ return messages, err
+ }
+ f, err := os.Create(destination)
+ if err != nil {
+ return messages, err
+ }
+ defer f.Close()
+ enc := json.NewEncoder(f)
+ enc.SetIndent("", " ")
+ err = enc.Encode(allReports)
+ if err != nil {
+ return messages, err
+ }
+ return messages, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
new file mode 100644
index 0000000..562e0f6
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
@@ -0,0 +1,390 @@
+/*
+
+JUnit XML Reporter for Ginkgo
+
+For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
+
+The schema used for the generated JUnit xml file was adapted from https://llg.cubic.org/docs/junit/
+
+*/
+
+package reporters
+
+import (
+ "encoding/xml"
+ "fmt"
+ "os"
+ "path"
+ "regexp"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/config"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type JunitReportConfig struct {
+ // Spec States for which no timeline should be emitted for system-err
+ // set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs
+ OmitTimelinesForSpecState types.SpecState
+
+ // Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags
+ OmitFailureMessageAttr bool
+
+ //Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out
+ OmitCapturedStdOutErr bool
+
+ // Enable OmitSpecLabels to prevent labels from appearing in the spec name
+ OmitSpecLabels bool
+
+ // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name
+ OmitLeafNodeType bool
+
+ // Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes
+ OmitSuiteSetupNodes bool
+}
+
+type JUnitTestSuites struct {
+ XMLName xml.Name `xml:"testsuites"`
+ // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite)
+ Tests int `xml:"tests,attr"`
+ // Disabled maps onto specs that are pending and/or skipped
+ Disabled int `xml:"disabled,attr"`
+ // Errors maps onto specs that panicked or were interrupted
+ Errors int `xml:"errors,attr"`
+ // Failures maps onto specs that failed
+ Failures int `xml:"failures,attr"`
+ // Time is the time in seconds to execute all test suites
+ Time float64 `xml:"time,attr"`
+
+ //The set of all test suites
+ TestSuites []JUnitTestSuite `xml:"testsuite"`
+}
+
+type JUnitTestSuite struct {
+ // Name maps onto the description of the test suite - maps onto Report.SuiteDescription
+ Name string `xml:"name,attr"`
+ // Package maps onto the absolute path to the test suite - maps onto Report.SuitePath
+ Package string `xml:"package,attr"`
+ // Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite)
+ Tests int `xml:"tests,attr"`
+ // Disabled maps onto specs that are pending
+ Disabled int `xml:"disabled,attr"`
+ // Skiped maps onto specs that are skipped
+ Skipped int `xml:"skipped,attr"`
+ // Errors maps onto specs that panicked or were interrupted
+ Errors int `xml:"errors,attr"`
+ // Failures maps onto specs that failed
+ Failures int `xml:"failures,attr"`
+ // Time is the time in seconds to execute all the test suite - maps onto Report.RunTime
+ Time float64 `xml:"time,attr"`
+ // Timestamp is the ISO 8601 formatted start-time of the suite - maps onto Report.StartTime
+ Timestamp string `xml:"timestamp,attr"`
+
+ //Properties captures the information stored in the rest of the Report type (including SuiteConfig) as key-value pairs
+ Properties JUnitProperties `xml:"properties"`
+
+ //TestCases capture the individual specs
+ TestCases []JUnitTestCase `xml:"testcase"`
+}
+
+type JUnitProperties struct {
+ Properties []JUnitProperty `xml:"property"`
+}
+
+func (jup JUnitProperties) WithName(name string) string {
+ for _, property := range jup.Properties {
+ if property.Name == name {
+ return property.Value
+ }
+ }
+ return ""
+}
+
+type JUnitProperty struct {
+ Name string `xml:"name,attr"`
+ Value string `xml:"value,attr"`
+}
+
+var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`)
+
+type JUnitTestCase struct {
+ // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()"
+ Name string `xml:"name,attr"`
+ // Classname maps onto the name of the test suite - equivalent to Report.SuiteDescription
+ Classname string `xml:"classname,attr"`
+ // Status maps onto the string representation of SpecReport.State
+ Status string `xml:"status,attr"`
+ // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime
+ Time float64 `xml:"time,attr"`
+ // Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes.
+ Owner string `xml:"owner,attr,omitempty"`
+ //Skipped is populated with a message if the test was skipped or pending
+ Skipped *JUnitSkipped `xml:"skipped,omitempty"`
+ //Error is populated if the test panicked or was interrupted
+ Error *JUnitError `xml:"error,omitempty"`
+ //Failure is populated if the test failed
+ Failure *JUnitFailure `xml:"failure,omitempty"`
+ //SystemOut maps onto any captured stdout/stderr output - maps onto SpecReport.CapturedStdOutErr
+ SystemOut string `xml:"system-out,omitempty"`
+ //SystemOut maps onto any captured GinkgoWriter output - maps onto SpecReport.CapturedGinkgoWriterOutput
+ SystemErr string `xml:"system-err,omitempty"`
+}
+
+type JUnitSkipped struct {
+ // Message maps onto "pending" if the test was marked pending, "skipped" if the test was marked skipped, and "skipped - REASON" if the user called Skip(REASON)
+ Message string `xml:"message,attr"`
+}
+
+type JUnitError struct {
+ //Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interrupted"
+ Message string `xml:"message,attr"`
+ //Type is one of "panicked" or "interrupted"
+ Type string `xml:"type,attr"`
+ //Description maps onto the captured stack trace for a panic, or the failure message for an interrupt which will include the dump of running goroutines
+ Description string `xml:",chardata"`
+}
+
+type JUnitFailure struct {
+ //Message maps onto the failure message - equivalent to SpecReport.Failure.Message
+ Message string `xml:"message,attr"`
+ //Type is "failed"
+ Type string `xml:"type,attr"`
+ //Description maps onto the location and stack trace of the failure
+ Description string `xml:",chardata"`
+}
+
+func GenerateJUnitReport(report types.Report, dst string) error {
+ return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{})
+}
+
+func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error {
+ suite := JUnitTestSuite{
+ Name: report.SuiteDescription,
+ Package: report.SuitePath,
+ Time: report.RunTime.Seconds(),
+ Timestamp: report.StartTime.Format("2006-01-02T15:04:05"),
+ Properties: JUnitProperties{
+ Properties: []JUnitProperty{
+ {"SuiteSucceeded", fmt.Sprintf("%t", report.SuiteSucceeded)},
+ {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)},
+ {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")},
+ {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))},
+ {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)},
+ {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)},
+ {"LabelFilter", report.SuiteConfig.LabelFilter},
+ {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")},
+ {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")},
+ {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")},
+ {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")},
+ {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
+ {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)},
+ {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
+ {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
+ {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
+ {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)},
+ {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode},
+ },
+ },
+ }
+ for _, spec := range report.SpecReports {
+ if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt {
+ continue
+ }
+ name := fmt.Sprintf("[%s]", spec.LeafNodeType)
+ if config.OmitLeafNodeType {
+ name = ""
+ }
+ if spec.FullText() != "" {
+ name = name + " " + spec.FullText()
+ }
+ labels := spec.Labels()
+ if len(labels) > 0 && !config.OmitSpecLabels {
+ name = name + " [" + strings.Join(labels, ", ") + "]"
+ }
+ owner := ""
+ for _, label := range labels {
+ if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 {
+ owner = matches[1]
+ }
+ }
+ name = strings.TrimSpace(name)
+
+ test := JUnitTestCase{
+ Name: name,
+ Classname: report.SuiteDescription,
+ Status: spec.State.String(),
+ Time: spec.RunTime.Seconds(),
+ Owner: owner,
+ }
+ if !spec.State.Is(config.OmitTimelinesForSpecState) {
+ test.SystemErr = systemErrForUnstructuredReporters(spec)
+ }
+ if !config.OmitCapturedStdOutErr {
+ test.SystemOut = systemOutForUnstructuredReporters(spec)
+ }
+ suite.Tests += 1
+
+ switch spec.State {
+ case types.SpecStateSkipped:
+ message := "skipped"
+ if spec.Failure.Message != "" {
+ message += " - " + spec.Failure.Message
+ }
+ test.Skipped = &JUnitSkipped{Message: message}
+ suite.Skipped += 1
+ case types.SpecStatePending:
+ test.Skipped = &JUnitSkipped{Message: "pending"}
+ suite.Disabled += 1
+ case types.SpecStateFailed:
+ test.Failure = &JUnitFailure{
+ Message: spec.Failure.Message,
+ Type: "failed",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Failure.Message = ""
+ }
+ suite.Failures += 1
+ case types.SpecStateTimedout:
+ test.Failure = &JUnitFailure{
+ Message: spec.Failure.Message,
+ Type: "timedout",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Failure.Message = ""
+ }
+ suite.Failures += 1
+ case types.SpecStateInterrupted:
+ test.Error = &JUnitError{
+ Message: spec.Failure.Message,
+ Type: "interrupted",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Error.Message = ""
+ }
+ suite.Errors += 1
+ case types.SpecStateAborted:
+ test.Failure = &JUnitFailure{
+ Message: spec.Failure.Message,
+ Type: "aborted",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Failure.Message = ""
+ }
+ suite.Errors += 1
+ case types.SpecStatePanicked:
+ test.Error = &JUnitError{
+ Message: spec.Failure.ForwardedPanic,
+ Type: "panicked",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Error.Message = ""
+ }
+ suite.Errors += 1
+ }
+
+ suite.TestCases = append(suite.TestCases, test)
+ }
+
+ junitReport := JUnitTestSuites{
+ Tests: suite.Tests,
+ Disabled: suite.Disabled + suite.Skipped,
+ Errors: suite.Errors,
+ Failures: suite.Failures,
+ Time: suite.Time,
+ TestSuites: []JUnitTestSuite{suite},
+ }
+
+ if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
+ return err
+ }
+ f, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ f.WriteString(xml.Header)
+ encoder := xml.NewEncoder(f)
+ encoder.Indent(" ", " ")
+ encoder.Encode(junitReport)
+
+ return f.Close()
+}
+
+func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) {
+ messages := []string{}
+ mergedReport := JUnitTestSuites{}
+ for _, source := range sources {
+ report := JUnitTestSuites{}
+ f, err := os.Open(source)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
+ continue
+ }
+ err = xml.NewDecoder(f).Decode(&report)
+ _ = f.Close()
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
+ continue
+ }
+ os.Remove(source)
+
+ mergedReport.Tests += report.Tests
+ mergedReport.Disabled += report.Disabled
+ mergedReport.Errors += report.Errors
+ mergedReport.Failures += report.Failures
+ mergedReport.Time += report.Time
+ mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...)
+ }
+
+ if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
+ return messages, err
+ }
+ f, err := os.Create(dst)
+ if err != nil {
+ return messages, err
+ }
+ f.WriteString(xml.Header)
+ encoder := xml.NewEncoder(f)
+ encoder.Indent(" ", " ")
+ encoder.Encode(mergedReport)
+
+ return messages, f.Close()
+}
+
+func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string {
+ out := &strings.Builder{}
+ NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true)
+ if len(spec.AdditionalFailures) > 0 {
+ out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n")
+ }
+ return out.String()
+}
+
+func systemErrForUnstructuredReporters(spec types.SpecReport) string {
+ return RenderTimeline(spec, true)
+}
+
+func RenderTimeline(spec types.SpecReport, noColor bool) string {
+ out := &strings.Builder{}
+ NewDefaultReporter(types.ReporterConfig{NoColor: noColor, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline())
+ return out.String()
+}
+
+func systemOutForUnstructuredReporters(spec types.SpecReport) string {
+ return spec.CapturedStdOutErr
+}
+
+// Deprecated JUnitReporter (so folks can still compile their suites)
+type JUnitReporter struct{}
+
+func NewJUnitReporter(_ string) *JUnitReporter { return &JUnitReporter{} }
+func (reporter *JUnitReporter) SuiteWillBegin(_ config.GinkgoConfigType, _ *types.SuiteSummary) {}
+func (reporter *JUnitReporter) BeforeSuiteDidRun(_ *types.SetupSummary) {}
+func (reporter *JUnitReporter) SpecWillRun(_ *types.SpecSummary) {}
+func (reporter *JUnitReporter) SpecDidComplete(_ *types.SpecSummary) {}
+func (reporter *JUnitReporter) AfterSuiteDidRun(_ *types.SetupSummary) {}
+func (reporter *JUnitReporter) SuiteDidEnd(_ *types.SuiteSummary) {}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
new file mode 100644
index 0000000..5e726c4
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
@@ -0,0 +1,29 @@
+package reporters
+
+import (
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Reporter interface {
+ SuiteWillBegin(report types.Report)
+ WillRun(report types.SpecReport)
+ DidRun(report types.SpecReport)
+ SuiteDidEnd(report types.Report)
+
+ //Timeline emission
+ EmitFailure(state types.SpecState, failure types.Failure)
+ EmitProgressReport(progressReport types.ProgressReport)
+ EmitReportEntry(entry types.ReportEntry)
+ EmitSpecEvent(event types.SpecEvent)
+}
+
+type NoopReporter struct{}
+
+func (n NoopReporter) SuiteWillBegin(report types.Report) {}
+func (n NoopReporter) WillRun(report types.SpecReport) {}
+func (n NoopReporter) DidRun(report types.SpecReport) {}
+func (n NoopReporter) SuiteDidEnd(report types.Report) {}
+func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {}
+func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {}
+func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {}
+func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
new file mode 100644
index 0000000..e990ad8
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
@@ -0,0 +1,105 @@
+/*
+
+TeamCity Reporter for Ginkgo
+
+Makes use of TeamCity's support for Service Messages
+http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
+*/
+
+package reporters
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func tcEscape(s string) string {
+ s = strings.ReplaceAll(s, "|", "||")
+ s = strings.ReplaceAll(s, "'", "|'")
+ s = strings.ReplaceAll(s, "\n", "|n")
+ s = strings.ReplaceAll(s, "\r", "|r")
+ s = strings.ReplaceAll(s, "[", "|[")
+ s = strings.ReplaceAll(s, "]", "|]")
+ return s
+}
+
+func GenerateTeamcityReport(report types.Report, dst string) error {
+ if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
+ return err
+ }
+ f, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+
+ name := report.SuiteDescription
+ labels := report.SuiteLabels
+ if len(labels) > 0 {
+ name = name + " [" + strings.Join(labels, ", ") + "]"
+ }
+ fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name))
+ for _, spec := range report.SpecReports {
+ name := fmt.Sprintf("[%s]", spec.LeafNodeType)
+ if spec.FullText() != "" {
+ name = name + " " + spec.FullText()
+ }
+ labels := spec.Labels()
+ if len(labels) > 0 {
+ name = name + " [" + strings.Join(labels, ", ") + "]"
+ }
+
+ name = tcEscape(name)
+ fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name)
+ switch spec.State {
+ case types.SpecStatePending:
+ fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='pending']\n", name)
+ case types.SpecStateSkipped:
+ message := "skipped"
+ if spec.Failure.Message != "" {
+ message += " - " + spec.Failure.Message
+ }
+ fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message))
+ case types.SpecStateFailed:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ case types.SpecStatePanicked:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details))
+ case types.SpecStateTimedout:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='timedout - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ case types.SpecStateInterrupted:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ case types.SpecStateAborted:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ }
+
+ fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructuredReporters(spec)))
+ fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(systemErrForUnstructuredReporters(spec)))
+ fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0))
+ }
+ fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription))
+
+ return f.Close()
+}
+
+func MergeAndCleanupTeamcityReports(sources []string, dst string) ([]string, error) {
+ messages := []string{}
+ merged := []byte{}
+ for _, source := range sources {
+ data, err := os.ReadFile(source)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
+ continue
+ }
+ os.Remove(source)
+ merged = append(merged, data...)
+ }
+ return messages, os.WriteFile(dst, merged, 0666)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
new file mode 100644
index 0000000..aa1a351
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
@@ -0,0 +1,221 @@
+package ginkgo
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/internal/global"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+Report represents the report for a Suite.
+It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#Report
+*/
+type Report = types.Report
+
+/*
+Report represents the report for a Spec.
+It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
+*/
+type SpecReport = types.SpecReport
+
+/*
+CurrentSpecReport returns information about the current running spec.
+The returned object is a types.SpecReport which includes helper methods
+to make extracting information about the spec easier.
+
+You can learn more about SpecReport here: https://pkg.go.dev/github.com/onsi/ginkgo/types#SpecReport
+You can learn more about CurrentSpecReport() here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
+*/
+func CurrentSpecReport() SpecReport {
+ return global.Suite.CurrentSpecReport()
+}
+
+/*
+ ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter
+
+- ReportEntryVisibilityAlways: the default behavior - the ReportEntry is always emitted.
+- ReportEntryVisibilityFailureOrVerbose: the ReportEntry is only emitted if the spec fails or if the tests are run with -v (similar to GinkgoWriters behavior).
+- ReportEntryVisibilityNever: the ReportEntry is never emitted though it appears in any generated machine-readable reports (e.g. by setting `--json-report`).
+
+You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports
+*/
+type ReportEntryVisibility = types.ReportEntryVisibility
+
+const ReportEntryVisibilityAlways, ReportEntryVisibilityFailureOrVerbose, ReportEntryVisibilityNever = types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose, types.ReportEntryVisibilityNever
+
+/*
+AddReportEntry generates and adds a new ReportEntry to the current spec's SpecReport.
+It can take any of the following arguments:
+ - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted.
+ - A ReportEntryVisibility enum to control the visibility of the ReportEntry
+ - An Offset or CodeLocation decoration to control the reported location of the ReportEntry
+
+If the Value object implements `fmt.Stringer`, it's `String()` representation is used when emitting to the console.
+
+AddReportEntry() must be called within a Subject or Setup node - not in a Container node.
+
+You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports
+*/
+func AddReportEntry(name string, args ...interface{}) {
+ cl := types.NewCodeLocation(1)
+ reportEntry, err := internal.NewReportEntry(name, cl, args...)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate Report Entry:\n%s", err.Error()), 1)
+ }
+ err = global.Suite.AddReportEntry(reportEntry)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to add Report Entry:\n%s", err.Error()), 1)
+ }
+}
+
+/*
+ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that
+receives a SpecReport or both SpecContext and Report for interruptible behavior. They are called before the spec starts.
+
+Example:
+
+ ReportBeforeEach(func(report SpecReport) { // process report })
+ ReportBeforeEach(func(ctx SpecContext, report SpecReport) {
+ // process report
+ }), NodeTimeout(1 * time.Minute))
+
+You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure.
+You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
+*/
+func ReportBeforeEach(body any, args ...any) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...))
+}
+
+/*
+ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending.
+ReportAfterEach nodes take a function that receives a SpecReport or both SpecContext and Report for interruptible behavior.
+They are called after the spec has completed and receive the final report for the spec.
+
+Example:
+
+ ReportAfterEach(func(report SpecReport) { // process report })
+ ReportAfterEach(func(ctx SpecContext, report SpecReport) {
+ // process report
+ }), NodeTimeout(1 * time.Minute))
+
+You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure.
+You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
+*/
+func ReportAfterEach(body any, args ...any) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...))
+}
+
+/*
+ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function
+that can either receive Report or both SpecContext and Report for interruptible behavior.
+
+Example Usage:
+
+ ReportBeforeSuite(func(r Report) { // process report })
+ ReportBeforeSuite(func(ctx SpecContext, r Report) {
+ // process report
+ }, NodeTimeout(1 * time.Minute))
+
+They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite.
+ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
+
+# When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportBeforeSuite
+
+You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
+You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
+*/
+func ReportBeforeSuite(body any, args ...any) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))
+}
+
+/*
+ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes execute at the suite's conclusion,
+and accept a function that can either receive Report or both SpecContext and Report for interruptible behavior.
+
+Example Usage:
+
+ ReportAfterSuite("Non-interruptible ReportAfterSuite", func(r Report) { // process report })
+ ReportAfterSuite("Interruptible ReportAfterSuite", func(ctx SpecContext, r Report) {
+ // process report
+ }, NodeTimeout(1 * time.Minute))
+
+They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite.
+ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
+
+When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across
+all parallel nodes
+
+In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags.
+
+You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
+You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
+*/
+func ReportAfterSuite(text string, body any, args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))
+}
+
+func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) {
+ body := func(report Report) {
+ if reporterConfig.JSONReport != "" {
+ err := reporters.GenerateJSONReport(report, reporterConfig.JSONReport)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate JSON report:\n%s", err.Error()))
+ }
+ }
+ if reporterConfig.JUnitReport != "" {
+ err := reporters.GenerateJUnitReport(report, reporterConfig.JUnitReport)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate JUnit report:\n%s", err.Error()))
+ }
+ }
+ if reporterConfig.TeamcityReport != "" {
+ err := reporters.GenerateTeamcityReport(report, reporterConfig.TeamcityReport)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate Teamcity report:\n%s", err.Error()))
+ }
+ }
+ }
+
+ flags := []string{}
+ if reporterConfig.JSONReport != "" {
+ flags = append(flags, "--json-report")
+ }
+ if reporterConfig.JUnitReport != "" {
+ flags = append(flags, "--junit-report")
+ }
+ if reporterConfig.TeamcityReport != "" {
+ flags = append(flags, "--teamcity-report")
+ }
+ pushNode(internal.NewNode(
+ deprecationTracker, types.NodeTypeReportAfterSuite,
+ fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")),
+ body,
+ types.NewCustomCodeLocation("autogenerated by Ginkgo"),
+ ))
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
new file mode 100644
index 0000000..c7de7a8
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
@@ -0,0 +1,386 @@
+package ginkgo
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+The EntryDescription decorator allows you to pass a format string to DescribeTable() and Entry(). This format string is used to generate entry names via:
+
+ fmt.Sprintf(formatString, parameters...)
+
+where parameters are the parameters passed into the entry.
+
+When passed into an Entry the EntryDescription is used to generate the name or that entry. When passed to DescribeTable, the EntryDescription is used to generate the names for any entries that have `nil` descriptions.
+
+You can learn more about generating EntryDescriptions here: https://onsi.github.io/ginkgo/#generating-entry-descriptions
+*/
+type EntryDescription string
+
+func (ed EntryDescription) render(args ...interface{}) string {
+ return fmt.Sprintf(string(ed), args...)
+}
+
+/*
+DescribeTable describes a table-driven spec.
+
+For example:
+
+ DescribeTable("a simple table",
+ func(x int, y int, expected bool) {
+ Ω(x > y).Should(Equal(expected))
+ },
+ Entry("x > y", 1, 0, true),
+ Entry("x == y", 0, 0, false),
+ Entry("x < y", 0, 1, false),
+ )
+
+You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs
+And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
+*/
+func DescribeTable(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ generateTable(description, false, args...)
+ return true
+}
+
+/*
+You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
+*/
+func FDescribeTable(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ args = append(args, internal.Focus)
+ generateTable(description, false, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
+*/
+func PDescribeTable(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ args = append(args, internal.Pending)
+ generateTable(description, false, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`.
+*/
+var XDescribeTable = PDescribeTable
+
+/*
+DescribeTableSubtree describes a table-driven spec that generates a set of tests for each entry.
+
+For example:
+
+ DescribeTableSubtree("a subtree table",
+ func(url string, code int, message string) {
+ var resp *http.Response
+ BeforeEach(func() {
+ var err error
+ resp, err = http.Get(url)
+ Expect(err).NotTo(HaveOccurred())
+ DeferCleanup(resp.Body.Close)
+ })
+
+ It("should return the expected status code", func() {
+ Expect(resp.StatusCode).To(Equal(code))
+ })
+
+ It("should return the expected message", func() {
+ body, err := ioutil.ReadAll(resp.Body)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(string(body)).To(Equal(message))
+ })
+ },
+ Entry("default response", "example.com/response", http.StatusOK, "hello world"),
+ Entry("missing response", "example.com/missing", http.StatusNotFound, "wat?"),
+ )
+
+Note that you **must** place define an It inside the body function.
+
+You can learn more about DescribeTableSubtree here: https://onsi.github.io/ginkgo/#table-specs
+And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
+*/
+func DescribeTableSubtree(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ generateTable(description, true, args...)
+ return true
+}
+
+/*
+You can focus a table with `FDescribeTableSubtree`. This is equivalent to `FDescribe`.
+*/
+func FDescribeTableSubtree(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ args = append(args, internal.Focus)
+ generateTable(description, true, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `PDescribeTableSubtree`. This is equivalent to `PDescribe`.
+*/
+func PDescribeTableSubtree(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ args = append(args, internal.Pending)
+ generateTable(description, true, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `XDescribeTableSubtree`. This is equivalent to `XDescribe`.
+*/
+var XDescribeTableSubtree = PDescribeTableSubtree
+
+/*
+TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
+*/
+type TableEntry struct {
+ description interface{}
+ decorations []interface{}
+ parameters []interface{}
+ codeLocation types.CodeLocation
+}
+
+/*
+Entry constructs a TableEntry.
+
+The first argument is a description. This can be a string, a function that accepts the parameters passed to the TableEntry and returns a string, an EntryDescription format string, or nil. If nil is provided then the name of the Entry is derived using the table-level entry description.
+Subsequent arguments accept any Ginkgo decorators. These are filtered out and the remaining arguments are passed into the Spec function associated with the table.
+
+Each Entry ends up generating an individual Ginkgo It. The body of the it is the Table Body function with the Entry parameters passed in.
+
+If you want to generate interruptible specs simply write a Table function that accepts a SpecContext as its first argument. You can then decorate individual Entrys with the NodeTimeout and SpecTimeout decorators.
+
+You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs
+*/
+func Entry(description interface{}, args ...interface{}) TableEntry {
+ GinkgoHelper()
+ decorations, parameters := internal.PartitionDecorations(args...)
+ return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
+}
+
+/*
+You can focus a particular entry with FEntry. This is equivalent to FIt.
+*/
+func FEntry(description interface{}, args ...interface{}) TableEntry {
+ GinkgoHelper()
+ decorations, parameters := internal.PartitionDecorations(args...)
+ decorations = append(decorations, internal.Focus)
+ return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
+}
+
+/*
+You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
+*/
+func PEntry(description interface{}, args ...interface{}) TableEntry {
+ GinkgoHelper()
+ decorations, parameters := internal.PartitionDecorations(args...)
+ decorations = append(decorations, internal.Pending)
+ return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
+}
+
+/*
+You can mark a particular entry as pending with XEntry. This is equivalent to XIt.
+*/
+var XEntry = PEntry
+
+var contextType = reflect.TypeOf(new(context.Context)).Elem()
+var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
+
+func generateTable(description string, isSubtree bool, args ...interface{}) {
+ GinkgoHelper()
+ cl := types.NewCodeLocation(0)
+ containerNodeArgs := []interface{}{cl}
+
+ entries := []TableEntry{}
+ var internalBody interface{}
+ var internalBodyType reflect.Type
+
+ var tableLevelEntryDescription interface{}
+ tableLevelEntryDescription = func(args ...interface{}) string {
+ out := []string{}
+ for _, arg := range args {
+ out = append(out, fmt.Sprint(arg))
+ }
+ return "Entry: " + strings.Join(out, ", ")
+ }
+
+ if len(args) == 1 {
+ exitIfErr(types.GinkgoErrors.MissingParametersForTableFunction(cl))
+ }
+
+ for i, arg := range args {
+ switch t := reflect.TypeOf(arg); {
+ case t == nil:
+ exitIfErr(types.GinkgoErrors.IncorrectParameterTypeForTable(i, "nil", cl))
+ case t == reflect.TypeOf(TableEntry{}):
+ entries = append(entries, arg.(TableEntry))
+ case t == reflect.TypeOf([]TableEntry{}):
+ entries = append(entries, arg.([]TableEntry)...)
+ case t == reflect.TypeOf(EntryDescription("")):
+ tableLevelEntryDescription = arg.(EntryDescription).render
+ case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""):
+ tableLevelEntryDescription = arg
+ case t.Kind() == reflect.Func:
+ if internalBody != nil {
+ exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl))
+ }
+ internalBody = arg
+ internalBodyType = reflect.TypeOf(internalBody)
+ default:
+ containerNodeArgs = append(containerNodeArgs, arg)
+ }
+ }
+
+ containerNodeArgs = append(containerNodeArgs, func() {
+ for _, entry := range entries {
+ var err error
+ entry := entry
+ var description string
+ switch t := reflect.TypeOf(entry.description); {
+ case t == nil:
+ err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation, false)
+ if err == nil {
+ description = invokeFunction(tableLevelEntryDescription, entry.parameters)[0].String()
+ }
+ case t == reflect.TypeOf(EntryDescription("")):
+ description = entry.description.(EntryDescription).render(entry.parameters...)
+ case t == reflect.TypeOf(""):
+ description = entry.description.(string)
+ case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""):
+ err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation, false)
+ if err == nil {
+ description = invokeFunction(entry.description, entry.parameters)[0].String()
+ }
+ default:
+ err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation)
+ }
+
+ internalNodeArgs := []interface{}{entry.codeLocation}
+ internalNodeArgs = append(internalNodeArgs, entry.decorations...)
+
+ hasContext := false
+ if internalBodyType.NumIn() > 0 {
+ if internalBodyType.In(0).Implements(specContextType) {
+ hasContext = true
+ } else if internalBodyType.In(0).Implements(contextType) {
+ hasContext = true
+ if len(entry.parameters) > 0 && reflect.TypeOf(entry.parameters[0]) != nil && reflect.TypeOf(entry.parameters[0]).Implements(contextType) {
+ // we allow you to pass in a non-nil context
+ hasContext = false
+ }
+ }
+ }
+
+ if err == nil {
+ err = validateParameters(internalBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext)
+ }
+
+ if hasContext {
+ internalNodeArgs = append(internalNodeArgs, func(c SpecContext) {
+ if err != nil {
+ panic(err)
+ }
+ invokeFunction(internalBody, append([]interface{}{c}, entry.parameters...))
+ })
+ if isSubtree {
+ exitIfErr(types.GinkgoErrors.ContextsCannotBeUsedInSubtreeTables(cl))
+ }
+ } else {
+ internalNodeArgs = append(internalNodeArgs, func() {
+ if err != nil {
+ panic(err)
+ }
+ invokeFunction(internalBody, entry.parameters)
+ })
+ }
+
+ internalNodeType := types.NodeTypeIt
+ if isSubtree {
+ internalNodeType = types.NodeTypeContainer
+ }
+
+ pushNode(internal.NewNode(deprecationTracker, internalNodeType, description, internalNodeArgs...))
+ }
+ })
+
+ pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...))
+}
+
+func invokeFunction(function interface{}, parameters []interface{}) []reflect.Value {
+ inValues := make([]reflect.Value, len(parameters))
+
+ funcType := reflect.TypeOf(function)
+ limit := funcType.NumIn()
+ if funcType.IsVariadic() {
+ limit = limit - 1
+ }
+
+ for i := 0; i < limit && i < len(parameters); i++ {
+ inValues[i] = computeValue(parameters[i], funcType.In(i))
+ }
+
+ if funcType.IsVariadic() {
+ variadicType := funcType.In(limit).Elem()
+ for i := limit; i < len(parameters); i++ {
+ inValues[i] = computeValue(parameters[i], variadicType)
+ }
+ }
+
+ return reflect.ValueOf(function).Call(inValues)
+}
+
+func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation, hasContext bool) error {
+ funcType := reflect.TypeOf(function)
+ limit := funcType.NumIn()
+ offset := 0
+ if hasContext {
+ limit = limit - 1
+ offset = 1
+ }
+ if funcType.IsVariadic() {
+ limit = limit - 1
+ }
+ if len(parameters) < limit {
+ return types.GinkgoErrors.TooFewParametersToTableFunction(limit, len(parameters), kind, cl)
+ }
+ if len(parameters) > limit && !funcType.IsVariadic() {
+ return types.GinkgoErrors.TooManyParametersToTableFunction(limit, len(parameters), kind, cl)
+ }
+ var i = 0
+ for ; i < limit; i++ {
+ actual := reflect.TypeOf(parameters[i])
+ expected := funcType.In(i + offset)
+ if !(actual == nil) && !actual.AssignableTo(expected) {
+ return types.GinkgoErrors.IncorrectParameterTypeToTableFunction(i+1, expected, actual, kind, cl)
+ }
+ }
+ if funcType.IsVariadic() {
+ expected := funcType.In(limit + offset).Elem()
+ for ; i < len(parameters); i++ {
+ actual := reflect.TypeOf(parameters[i])
+ if !(actual == nil) && !actual.AssignableTo(expected) {
+ return types.GinkgoErrors.IncorrectVariadicParameterTypeToTableFunction(expected, actual, kind, cl)
+ }
+ }
+ }
+
+ return nil
+}
+
+func computeValue(parameter interface{}, t reflect.Type) reflect.Value {
+ if parameter == nil {
+ return reflect.Zero(t)
+ } else {
+ return reflect.ValueOf(parameter)
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go
new file mode 100644
index 0000000..57e8751
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go
@@ -0,0 +1,159 @@
+package types
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ "sync"
+)
+
+type CodeLocation struct {
+ FileName string `json:",omitempty"`
+ LineNumber int `json:",omitempty"`
+ FullStackTrace string `json:",omitempty"`
+ CustomMessage string `json:",omitempty"`
+}
+
+func (codeLocation CodeLocation) String() string {
+ if codeLocation.CustomMessage != "" {
+ return codeLocation.CustomMessage
+ }
+ return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
+}
+
+func (codeLocation CodeLocation) ContentsOfLine() string {
+ if codeLocation.CustomMessage != "" {
+ return ""
+ }
+ contents, err := os.ReadFile(codeLocation.FileName)
+ if err != nil {
+ return ""
+ }
+ lines := strings.Split(string(contents), "\n")
+ if len(lines) < codeLocation.LineNumber {
+ return ""
+ }
+ return lines[codeLocation.LineNumber-1]
+}
+
+type codeLocationLocator struct {
+ pcs map[uintptr]bool
+ helpers map[string]bool
+ lock *sync.Mutex
+}
+
+func (c *codeLocationLocator) addHelper(pc uintptr) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if c.pcs[pc] {
+ return
+ }
+ c.lock.Unlock()
+ f := runtime.FuncForPC(pc)
+ c.lock.Lock()
+ if f == nil {
+ return
+ }
+ c.helpers[f.Name()] = true
+ c.pcs[pc] = true
+}
+
+func (c *codeLocationLocator) hasHelper(name string) bool {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ return c.helpers[name]
+}
+
+func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation {
+ pc := make([]uintptr, 40)
+ n := runtime.Callers(skip+2, pc)
+ if n == 0 {
+ return CodeLocation{}
+ }
+ pc = pc[:n]
+ frames := runtime.CallersFrames(pc)
+ for {
+ frame, more := frames.Next()
+ if !c.hasHelper(frame.Function) {
+ return CodeLocation{FileName: frame.File, LineNumber: frame.Line}
+ }
+ if !more {
+ break
+ }
+ }
+ return CodeLocation{}
+}
+
+var clLocator = &codeLocationLocator{
+ pcs: map[uintptr]bool{},
+ helpers: map[string]bool{},
+ lock: &sync.Mutex{},
+}
+
+// MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers.
+func MarkAsHelper(optionalSkip ...int) {
+ skip := 1
+ if len(optionalSkip) > 0 {
+ skip += optionalSkip[0]
+ }
+ pc, _, _, ok := runtime.Caller(skip)
+ if ok {
+ clLocator.addHelper(pc)
+ }
+}
+
+func NewCustomCodeLocation(message string) CodeLocation {
+ return CodeLocation{
+ CustomMessage: message,
+ }
+}
+
+func NewCodeLocation(skip int) CodeLocation {
+ return clLocator.getCodeLocation(skip + 1)
+}
+
+func NewCodeLocationWithStackTrace(skip int) CodeLocation {
+ cl := clLocator.getCodeLocation(skip + 1)
+ cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1)
+ return cl
+}
+
+// PruneStack removes references to functions that are internal to Ginkgo
+// and the Go runtime from a stack string and a certain number of stack entries
+// at the beginning of the stack. The stack string has the format
+// as returned by runtime/debug.Stack. The leading goroutine information is
+// optional and always removed if present. Beware that runtime/debug.Stack
+// adds itself as first entry, so typically skip must be >= 1 to remove that
+// entry.
+func PruneStack(fullStackTrace string, skip int) string {
+ stack := strings.Split(fullStackTrace, "\n")
+ // Ensure that the even entries are the method names and the
+ // odd entries the source code information.
+ if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
+ // Ignore "goroutine 29 [running]:" line.
+ stack = stack[1:]
+ }
+ // The "+1" is for skipping over the initial entry, which is
+ // runtime/debug.Stack() itself.
+ if len(stack) > 2*(skip+1) {
+ stack = stack[2*(skip+1):]
+ }
+ prunedStack := []string{}
+ if os.Getenv("GINKGO_PRUNE_STACK") == "FALSE" {
+ prunedStack = stack
+ } else {
+ re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
+ for i := 0; i < len(stack)/2; i++ {
+ // We filter out based on the source code file name.
+ if !re.MatchString(stack[i*2+1]) {
+ prunedStack = append(prunedStack, stack[i*2])
+ prunedStack = append(prunedStack, stack[i*2+1])
+ }
+ }
+ }
+ return strings.Join(prunedStack, "\n")
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go
new file mode 100644
index 0000000..66463cf
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go
@@ -0,0 +1,770 @@
+/*
+Ginkgo accepts a number of configuration options.
+These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli)
+*/
+
+package types
+
+import (
+ "flag"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Configuration controlling how an individual test suite is run
+type SuiteConfig struct {
+ RandomSeed int64
+ RandomizeAllSpecs bool
+ FocusStrings []string
+ SkipStrings []string
+ FocusFiles []string
+ SkipFiles []string
+ LabelFilter string
+ FailOnPending bool
+ FailOnEmpty bool
+ FailFast bool
+ FlakeAttempts int
+ MustPassRepeatedly int
+ DryRun bool
+ PollProgressAfter time.Duration
+ PollProgressInterval time.Duration
+ Timeout time.Duration
+ EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually
+ OutputInterceptorMode string
+ SourceRoots []string
+ GracePeriod time.Duration
+
+ ParallelProcess int
+ ParallelTotal int
+ ParallelHost string
+}
+
+func NewDefaultSuiteConfig() SuiteConfig {
+ return SuiteConfig{
+ RandomSeed: time.Now().Unix(),
+ Timeout: time.Hour,
+ ParallelProcess: 1,
+ ParallelTotal: 1,
+ GracePeriod: 30 * time.Second,
+ }
+}
+
+type VerbosityLevel uint
+
+const (
+ VerbosityLevelSuccinct VerbosityLevel = iota
+ VerbosityLevelNormal
+ VerbosityLevelVerbose
+ VerbosityLevelVeryVerbose
+)
+
+func (vl VerbosityLevel) GT(comp VerbosityLevel) bool {
+ return vl > comp
+}
+
+func (vl VerbosityLevel) GTE(comp VerbosityLevel) bool {
+ return vl >= comp
+}
+
+func (vl VerbosityLevel) Is(comp VerbosityLevel) bool {
+ return vl == comp
+}
+
+func (vl VerbosityLevel) LTE(comp VerbosityLevel) bool {
+ return vl <= comp
+}
+
+func (vl VerbosityLevel) LT(comp VerbosityLevel) bool {
+ return vl < comp
+}
+
+// Configuration for Ginkgo's reporter
+type ReporterConfig struct {
+ NoColor bool
+ Succinct bool
+ Verbose bool
+ VeryVerbose bool
+ FullTrace bool
+ ShowNodeEvents bool
+ GithubOutput bool
+ SilenceSkips bool
+ ForceNewlines bool
+
+ JSONReport string
+ JUnitReport string
+ TeamcityReport string
+}
+
+func (rc ReporterConfig) Verbosity() VerbosityLevel {
+ if rc.Succinct {
+ return VerbosityLevelSuccinct
+ } else if rc.Verbose {
+ return VerbosityLevelVerbose
+ } else if rc.VeryVerbose {
+ return VerbosityLevelVeryVerbose
+ }
+ return VerbosityLevelNormal
+}
+
+func (rc ReporterConfig) WillGenerateReport() bool {
+ return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != ""
+}
+
+func NewDefaultReporterConfig() ReporterConfig {
+ return ReporterConfig{}
+}
+
+// Configuration for the Ginkgo CLI
+type CLIConfig struct {
+ //for build, run, and watch
+ Recurse bool
+ SkipPackage string
+ RequireSuite bool
+ NumCompilers int
+
+ //for run and watch only
+ Procs int
+ Parallel bool
+ AfterRunHook string
+ OutputDir string
+ KeepSeparateCoverprofiles bool
+ KeepSeparateReports bool
+
+ //for run only
+ KeepGoing bool
+ UntilItFails bool
+ Repeat int
+ RandomizeSuites bool
+
+ //for watch only
+ Depth int
+ WatchRegExp string
+}
+
+func NewDefaultCLIConfig() CLIConfig {
+ return CLIConfig{
+ Depth: 1,
+ WatchRegExp: `\.go$`,
+ }
+}
+
+func (g CLIConfig) ComputedProcs() int {
+ if g.Procs > 0 {
+ return g.Procs
+ }
+
+ n := 1
+ if g.Parallel {
+ n = runtime.NumCPU()
+ if n > 4 {
+ n = n - 1
+ }
+ }
+ return n
+}
+
+func (g CLIConfig) ComputedNumCompilers() int {
+ if g.NumCompilers > 0 {
+ return g.NumCompilers
+ }
+
+ return runtime.NumCPU()
+}
+
+// Configuration for the Ginkgo CLI capturing available go flags
+// A subset of Go flags are exposed by Ginkgo. Some are available at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags).
+// More details can be found at:
+// https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/
+type GoFlagsConfig struct {
+ //build-time flags for code-and-performance analysis
+ Race bool
+ Cover bool
+ CoverMode string
+ CoverPkg string
+ Vet string
+
+ //run-time flags for code-and-performance analysis
+ BlockProfile string
+ BlockProfileRate int
+ CoverProfile string
+ CPUProfile string
+ MemProfile string
+ MemProfileRate int
+ MutexProfile string
+ MutexProfileFraction int
+ Trace string
+
+ //build-time flags for building
+ A bool
+ ASMFlags string
+ BuildMode string
+ Compiler string
+ GCCGoFlags string
+ GCFlags string
+ InstallSuffix string
+ LDFlags string
+ LinkShared bool
+ Mod string
+ N bool
+ ModFile string
+ ModCacheRW bool
+ MSan bool
+ PkgDir string
+ Tags string
+ TrimPath bool
+ ToolExec string
+ Work bool
+ X bool
+}
+
+func NewDefaultGoFlagsConfig() GoFlagsConfig {
+ return GoFlagsConfig{}
+}
+
+func (g GoFlagsConfig) BinaryMustBePreserved() bool {
+ return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != ""
+}
+
+// Configuration that were deprecated in 2.0
+type deprecatedConfig struct {
+ DebugParallel bool
+ NoisySkippings bool
+ NoisyPendings bool
+ RegexScansFilePath bool
+ SlowSpecThresholdWithFLoatUnits float64
+ Stream bool
+ Notify bool
+ EmitSpecProgress bool
+ SlowSpecThreshold time.Duration
+ AlwaysEmitGinkgoWriter bool
+}
+
+// Flags
+
+// Flags sections used by both the CLI and the Ginkgo test process
+var FlagSections = GinkgoFlagSections{
+ {Key: "multiple-suites", Style: "{{dark-green}}", Heading: "Running Multiple Test Suites"},
+ {Key: "order", Style: "{{green}}", Heading: "Controlling Test Order"},
+ {Key: "parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism"},
+ {Key: "low-level-parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism",
+ Description: "These are set by the Ginkgo CLI, {{red}}{{bold}}do not set them manually{{/}} via go test.\nUse ginkgo -p or ginkgo -procs=N instead."},
+ {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"},
+ {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"},
+ {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"},
+ {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"},
+ {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"},
+ {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests",
+ Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."},
+ {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"},
+ {Key: "misc", Style: "{{light-gray}}", Heading: "Miscellaneous"},
+ {Key: "go-build", Style: "{{light-gray}}", Heading: "Go Build Flags", Succinct: true,
+ Description: "These flags are inherited from go build. Run {{bold}}ginkgo help build{{/}} for more detailed flag documentation."},
+}
+
+// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
+var SuiteConfigFlags = GinkgoFlags{
+ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
+ Usage: "The seed used to randomize the spec suite.", AlwaysExport: true},
+ {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
+
+ {KeyPath: "S.FailOnPending", Name: "fail-on-pending", SectionKey: "failure", DeprecatedName: "failOnPending", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will mark the test suite as failed if any specs are pending."},
+ {KeyPath: "S.FailFast", Name: "fail-fast", SectionKey: "failure", DeprecatedName: "failFast", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will stop running a test suite after a failure occurs."},
+ {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."},
+ {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure",
+ Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."},
+
+ {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
+ {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0",
+ Usage: "Emit node progress reports periodically if node hasn't completed after this duration."},
+ {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s",
+ Usage: "The rate at which to emit node progress reports after poll-progress-after has elapsed."},
+ {KeyPath: "S.SourceRoots", Name: "source-root", SectionKey: "debug",
+ Usage: "The location to look for source code when generating progress reports. You can pass multiple --source-root flags."},
+ {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h",
+ Usage: "Test suite fails if it does not complete within the specified timeout."},
+ {KeyPath: "S.GracePeriod", Name: "grace-period", SectionKey: "debug", UsageDefaultValue: "30s",
+ Usage: "When interrupted, Ginkgo will wait for GracePeriod for the current running node to exit before moving on to the next one."},
+ {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none",
+ Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."},
+
+ {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression",
+ Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"},
+ {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter",
+ Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."},
+ {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter",
+ Usage: "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed."},
+ {KeyPath: "S.FocusFiles", Name: "focus-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line",
+ Usage: "If set, ginkgo will only run specs in matching files. Can be specified multiple times, values are ORed."},
+ {KeyPath: "S.SkipFiles", Name: "skip-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line",
+ Usage: "If set, ginkgo will skip specs in matching files. Can be specified multiple times, values are ORed."},
+
+ {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug",
+ DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."},
+}
+
+// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI)
+var ParallelConfigFlags = GinkgoFlags{
+ {KeyPath: "S.ParallelProcess", Name: "parallel.process", SectionKey: "low-level-parallel", UsageDefaultValue: "1",
+ Usage: "This worker process's (one-indexed) process number. For running specs in parallel."},
+ {KeyPath: "S.ParallelTotal", Name: "parallel.total", SectionKey: "low-level-parallel", UsageDefaultValue: "1",
+ Usage: "The total number of worker processes. For running specs in parallel."},
+ {KeyPath: "S.ParallelHost", Name: "parallel.host", SectionKey: "low-level-parallel", UsageDefaultValue: "set by Ginkgo CLI",
+ Usage: "The address for the server that will synchronize the processes."},
+}
+
+// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI
+var ReporterConfigFlags = GinkgoFlags{
+ {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, suppress color output in default reporter."},
+ {KeyPath: "R.Verbose", Name: "v", SectionKey: "output",
+ Usage: "If set, emits more output including GinkgoWriter contents."},
+ {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output",
+ Usage: "If set, emits with maximal verbosity - includes skipped and pending tests."},
+ {KeyPath: "R.Succinct", Name: "succinct", SectionKey: "output",
+ Usage: "If set, default reporter prints out a very succinct report"},
+ {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output",
+ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
+ {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
+ Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
+ {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output",
+ Usage: "If set, default reporter prints easier to manage output in Github Actions."},
+ {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output",
+ Usage: "If set, default reporter will not print out skipped tests."},
+ {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output",
+ Usage: "If set, default reporter will ensure a newline appears after each test."},
+
+ {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
+ Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
+ {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure",
+ Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."},
+ {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output",
+ Usage: "If set, Ginkgo will generate a Teamcity-formatted test report at the specified location."},
+
+ {KeyPath: "D.SlowSpecThresholdWithFLoatUnits", DeprecatedName: "slowSpecThreshold", DeprecatedDocLink: "changed--slowspecthreshold",
+ Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"},
+ {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"},
+ {KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."},
+}
+
+// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process
+func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) {
+ flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...)
+ flags = flags.WithPrefix("ginkgo")
+ bindings := map[string]interface{}{
+ "S": suiteConfig,
+ "R": reporterConfig,
+ "D": &deprecatedConfig{},
+ }
+ extraGoFlagsSection := GinkgoFlagSection{Style: "{{gray}}", Heading: "Go test flags"}
+
+ return NewAttachedGinkgoFlagSet(flag.CommandLine, flags, bindings, FlagSections, extraGoFlagsSection)
+}
+
+// VetConfig validates that the Ginkgo test process' configuration is sound
+func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig ReporterConfig) []error {
+ errors := []error{}
+
+ if flagSet.WasSet("count") || flagSet.WasSet("test.count") {
+ flag := flagSet.Lookup("count")
+ if flag == nil {
+ flag = flagSet.Lookup("test.count")
+ }
+ count, err := strconv.Atoi(flag.Value.String())
+ if err != nil || count != 1 {
+ errors = append(errors, GinkgoErrors.InvalidGoFlagCount())
+ }
+ }
+
+ if flagSet.WasSet("parallel") || flagSet.WasSet("test.parallel") {
+ errors = append(errors, GinkgoErrors.InvalidGoFlagParallel())
+ }
+
+ if suiteConfig.ParallelTotal < 1 {
+ errors = append(errors, GinkgoErrors.InvalidParallelTotalConfiguration())
+ }
+
+ if suiteConfig.ParallelProcess > suiteConfig.ParallelTotal || suiteConfig.ParallelProcess < 1 {
+ errors = append(errors, GinkgoErrors.InvalidParallelProcessConfiguration())
+ }
+
+ if suiteConfig.ParallelTotal > 1 && suiteConfig.ParallelHost == "" {
+ errors = append(errors, GinkgoErrors.MissingParallelHostConfiguration())
+ }
+
+ if suiteConfig.DryRun && suiteConfig.ParallelTotal > 1 {
+ errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration())
+ }
+
+ if suiteConfig.GracePeriod <= 0 {
+ errors = append(errors, GinkgoErrors.GracePeriodCannotBeZero())
+ }
+
+ if len(suiteConfig.FocusFiles) > 0 {
+ _, err := ParseFileFilters(suiteConfig.FocusFiles)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ if len(suiteConfig.SkipFiles) > 0 {
+ _, err := ParseFileFilters(suiteConfig.SkipFiles)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ if suiteConfig.LabelFilter != "" {
+ _, err := ParseLabelFilter(suiteConfig.LabelFilter)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ switch strings.ToLower(suiteConfig.OutputInterceptorMode) {
+ case "", "dup", "swap", "none":
+ default:
+ errors = append(errors, GinkgoErrors.InvalidOutputInterceptorModeConfiguration(suiteConfig.OutputInterceptorMode))
+ }
+
+ numVerbosity := 0
+ for _, v := range []bool{reporterConfig.Succinct, reporterConfig.Verbose, reporterConfig.VeryVerbose} {
+ if v {
+ numVerbosity++
+ }
+ }
+ if numVerbosity > 1 {
+ errors = append(errors, GinkgoErrors.ConflictingVerbosityConfiguration())
+ }
+
+ return errors
+}
+
+// GinkgoCLISharedFlags provides flags shared by the Ginkgo CLI's build, watch, and run commands
+var GinkgoCLISharedFlags = GinkgoFlags{
+ {KeyPath: "C.Recurse", Name: "r", SectionKey: "multiple-suites",
+ Usage: "If set, ginkgo finds and runs test suites under the current directory recursively."},
+ {KeyPath: "C.SkipPackage", Name: "skip-package", SectionKey: "multiple-suites", DeprecatedName: "skipPackage", DeprecatedDocLink: "changed-command-line-flags",
+ UsageArgument: "comma-separated list of packages",
+ Usage: "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored."},
+ {KeyPath: "C.RequireSuite", Name: "require-suite", SectionKey: "failure", DeprecatedName: "requireSuite", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs."},
+ {KeyPath: "C.NumCompilers", Name: "compilers", SectionKey: "multiple-suites", UsageDefaultValue: "0 (will autodetect)",
+ Usage: "When running multiple packages, the number of concurrent compilations to perform."},
+}
+
+// GinkgoCLIRunAndWatchFlags provides flags shared by the Ginkgo CLI's build and watch commands (but not run)
+var GinkgoCLIRunAndWatchFlags = GinkgoFlags{
+ {KeyPath: "C.Procs", Name: "procs", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)",
+ Usage: "The number of parallel test nodes to run."},
+ {KeyPath: "C.Procs", Name: "nodes", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)",
+ Usage: "--nodes is an alias for --procs"},
+ {KeyPath: "C.Parallel", Name: "p", SectionKey: "parallel",
+ Usage: "If set, ginkgo will run in parallel with an auto-detected number of nodes."},
+ {KeyPath: "C.AfterRunHook", Name: "after-run-hook", SectionKey: "misc", DeprecatedName: "afterSuiteHook", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "Command to run when a test suite completes."},
+ {KeyPath: "C.OutputDir", Name: "output-dir", SectionKey: "output", UsageArgument: "directory", DeprecatedName: "outputdir", DeprecatedDocLink: "improved-profiling-support",
+ Usage: "A location to place all generated profiles and reports."},
+ {KeyPath: "C.KeepSeparateCoverprofiles", Name: "keep-separate-coverprofiles", SectionKey: "code-and-coverage-analysis",
+ Usage: "If set, Ginkgo does not merge coverprofiles into one monolithic coverprofile. The coverprofiles will remain in their respective package directories or in -output-dir if set."},
+ {KeyPath: "C.KeepSeparateReports", Name: "keep-separate-reports", SectionKey: "output",
+ Usage: "If set, Ginkgo does not merge per-suite reports (e.g. -json-report) into one monolithic report for the entire testrun. The reports will remain in their respective package directories or in -output-dir if set."},
+
+ {KeyPath: "D.Stream", DeprecatedName: "stream", DeprecatedDocLink: "removed--stream", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.Notify", DeprecatedName: "notify", DeprecatedDocLink: "removed--notify", DeprecatedVersion: "2.0.0"},
+}
+
+// GinkgoCLIRunFlags provides flags for Ginkgo CLI's run command that aren't shared by any other commands
+var GinkgoCLIRunFlags = GinkgoFlags{
+ {KeyPath: "C.KeepGoing", Name: "keep-going", SectionKey: "multiple-suites", DeprecatedName: "keepGoing", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, failures from earlier test suites do not prevent later test suites from running."},
+ {KeyPath: "C.UntilItFails", Name: "until-it-fails", SectionKey: "debug", DeprecatedName: "untilItFails", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will keep rerunning test suites until a failure occurs."},
+ {KeyPath: "C.Repeat", Name: "repeat", SectionKey: "debug", UsageArgument: "n", UsageDefaultValue: "0 - i.e. no repetition, run only once",
+ Usage: "The number of times to re-run a test-suite. Useful for debugging flaky tests. If set to N the suite will be run N+1 times and will be required to pass each time."},
+ {KeyPath: "C.RandomizeSuites", Name: "randomize-suites", SectionKey: "order", DeprecatedName: "randomizeSuites", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will randomize the order in which test suites run."},
+}
+
+// GinkgoCLIRunFlags provides flags for Ginkgo CLI's watch command that aren't shared by any other commands
+var GinkgoCLIWatchFlags = GinkgoFlags{
+ {KeyPath: "C.Depth", Name: "depth", SectionKey: "watch",
+ Usage: "Ginkgo will watch dependencies down to this depth in the dependency tree."},
+ {KeyPath: "C.WatchRegExp", Name: "watch-regexp", SectionKey: "watch", DeprecatedName: "watchRegExp", DeprecatedDocLink: "changed-command-line-flags",
+ UsageArgument: "Regular Expression",
+ UsageDefaultValue: `\.go$`,
+ Usage: "Only files matching this regular expression will be watched for changes."},
+}
+
+// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI
+var GoBuildFlags = GinkgoFlags{
+ {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis",
+ Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."},
+ {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis",
+ Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
+ {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis",
+ Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."},
+ {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis",
+ Usage: `Set the mode for coverage analysis for the package[s] being tested. 'set': does this statement run? 'count': how many times does this statement run? 'atomic': like count, but correct in multithreaded tests and more expensive (must use atomic with -race). Sets -cover`},
+ {KeyPath: "Go.CoverPkg", Name: "coverpkg", UsageArgument: "pattern1,pattern2,pattern3", SectionKey: "code-and-coverage-analysis",
+ Usage: "Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. See 'go help packages' for a description of package patterns. Sets -cover."},
+
+ {KeyPath: "Go.A", Name: "a", SectionKey: "go-build",
+ Usage: "force rebuilding of packages that are already up-to-date."},
+ {KeyPath: "Go.ASMFlags", Name: "asmflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each go tool asm invocation."},
+ {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build",
+ Usage: "build mode to use. See 'go help buildmode' for more."},
+ {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build",
+ Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."},
+ {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each gccgo compiler/linker invocation."},
+ {KeyPath: "Go.GCFlags", Name: "gcflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each go tool compile invocation."},
+ {KeyPath: "Go.InstallSuffix", Name: "installsuffix", SectionKey: "go-build",
+ Usage: "a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to raceor, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect."},
+ {KeyPath: "Go.LDFlags", Name: "ldflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each go tool link invocation."},
+ {KeyPath: "Go.LinkShared", Name: "linkshared", SectionKey: "go-build",
+ Usage: "build code that will be linked against shared libraries previously created with -buildmode=shared."},
+ {KeyPath: "Go.Mod", Name: "mod", UsageArgument: "mode (readonly, vendor, or mod)", SectionKey: "go-build",
+ Usage: "module download mode to use: readonly, vendor, or mod. See 'go help modules' for more."},
+ {KeyPath: "Go.ModCacheRW", Name: "modcacherw", SectionKey: "go-build",
+ Usage: "leave newly-created directories in the module cache read-write instead of making them read-only."},
+ {KeyPath: "Go.ModFile", Name: "modfile", UsageArgument: "file", SectionKey: "go-build",
+ Usage: `in module aware mode, read (and possibly write) an alternate go.mod file instead of the one in the module root directory. A file named go.mod must still be present in order to determine the module root directory, but it is not accessed. When -modfile is specified, an alternate go.sum file is also used: its path is derived from the -modfile flag by trimming the ".mod" extension and appending ".sum".`},
+ {KeyPath: "Go.MSan", Name: "msan", SectionKey: "go-build",
+ Usage: "enable interoperation with memory sanitizer. Supported only on linux/amd64, linux/arm64 and only with Clang/LLVM as the host C compiler. On linux/arm64, pie build mode will be used."},
+ {KeyPath: "Go.N", Name: "n", SectionKey: "go-build",
+ Usage: "print the commands but do not run them."},
+ {KeyPath: "Go.PkgDir", Name: "pkgdir", UsageArgument: "dir", SectionKey: "go-build",
+ Usage: "install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location."},
+ {KeyPath: "Go.Tags", Name: "tags", UsageArgument: "tag,list", SectionKey: "go-build",
+ Usage: "a comma-separated list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. (Earlier versions of Go used a space-separated list, and that form is deprecated but still recognized.)"},
+ {KeyPath: "Go.TrimPath", Name: "trimpath", SectionKey: "go-build",
+ Usage: `remove all file system paths from the resulting executable. Instead of absolute file system paths, the recorded file names will begin with either "go" (for the standard library), or a module path@version (when using modules), or a plain import path (when using GOPATH).`},
+ {KeyPath: "Go.ToolExec", Name: "toolexec", UsageArgument: "'cmd args'", SectionKey: "go-build",
+ Usage: "a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run cmd args /path/to/asm '."},
+ {KeyPath: "Go.Work", Name: "work", SectionKey: "go-build",
+ Usage: "print the name of the temporary work directory and do not delete it when exiting."},
+ {KeyPath: "Go.X", Name: "x", SectionKey: "go-build",
+ Usage: "print the commands."},
+}
+
+// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI
+var GoRunFlags = GinkgoFlags{
+ {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis",
+ Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`},
+ {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`},
+ {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
+ Usage: `Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with rate. See 'go doc runtime.SetBlockProfileRate'. The profiler aims to sample, on average, one blocking event every n nanoseconds the program spends blocked. By default, if -test.blockprofile is set without this flag, all blocking events are recorded, equivalent to -test.blockprofilerate=1.`},
+ {KeyPath: "Go.CPUProfile", Name: "cpuprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write a CPU profile to the specified file before exiting. Preserves test binary.`},
+ {KeyPath: "Go.MemProfile", Name: "memprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write an allocation profile to the file after all tests have passed. Preserves test binary.`},
+ {KeyPath: "Go.MemProfileRate", Name: "memprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
+ Usage: `Enable more precise (and expensive) memory allocation profiles by setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. To profile all memory allocations, use -test.memprofilerate=1.`},
+ {KeyPath: "Go.MutexProfile", Name: "mutexprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write a mutex contention profile to the specified file when all tests are complete. Preserves test binary.`},
+ {KeyPath: "Go.MutexProfileFraction", Name: "mutexprofilefraction", UsageArgument: "n", SectionKey: "performance-analysis",
+ Usage: `if >= 0, calls runtime.SetMutexProfileFraction() Sample 1 in n stack traces of goroutines holding a contended mutex.`},
+ {KeyPath: "Go.Trace", Name: "execution-trace", UsageArgument: "file", ExportAs: "trace", SectionKey: "performance-analysis",
+ Usage: `Write an execution trace to the specified file before exiting.`},
+}
+
+// VetAndInitializeCLIAndGoConfig validates that the Ginkgo CLI's configuration is sound
+// It returns a potentially mutated copy of the config that rationalizes the configuration to ensure consistency for downstream consumers
+func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsConfig) (CLIConfig, GoFlagsConfig, []error) {
+ errors := []error{}
+
+ if cliConfig.Repeat > 0 && cliConfig.UntilItFails {
+ errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails())
+ }
+
+ //initialize the output directory
+ if cliConfig.OutputDir != "" {
+ err := os.MkdirAll(cliConfig.OutputDir, 0777)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ //ensure cover mode is configured appropriately
+ if goFlagsConfig.CoverMode != "" || goFlagsConfig.CoverPkg != "" || goFlagsConfig.CoverProfile != "" {
+ goFlagsConfig.Cover = true
+ }
+ if goFlagsConfig.Cover && goFlagsConfig.CoverProfile == "" {
+ goFlagsConfig.CoverProfile = "coverprofile.out"
+ }
+
+ return cliConfig, goFlagsConfig, errors
+}
+
+// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
+func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) {
+ // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
+ // the built test binary can generate a coverprofile
+ if goFlagsConfig.CoverProfile != "" {
+ goFlagsConfig.Cover = true
+ }
+
+ if goFlagsConfig.CoverPkg != "" {
+ coverPkgs := strings.Split(goFlagsConfig.CoverPkg, ",")
+ adjustedCoverPkgs := make([]string, len(coverPkgs))
+ for i, coverPkg := range coverPkgs {
+ coverPkg = strings.Trim(coverPkg, " ")
+ if strings.HasPrefix(coverPkg, "./") {
+ // this is a relative coverPkg - we need to reroot it
+ adjustedCoverPkgs[i] = "./" + filepath.Join(pathToInvocationPath, strings.TrimPrefix(coverPkg, "./"))
+ } else {
+ // this is a package name - don't touch it
+ adjustedCoverPkgs[i] = coverPkg
+ }
+ }
+ goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
+ }
+
+ args := []string{"test", "-c", "-o", destination, packageToBuild}
+ goArgs, err := GenerateFlagArgs(
+ GoBuildFlags,
+ map[string]interface{}{
+ "Go": &goFlagsConfig,
+ },
+ )
+
+ if err != nil {
+ return []string{}, err
+ }
+ args = append(args, goArgs...)
+ return args, nil
+}
+
+// GenerateGinkgoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled Ginkgo test binary
+func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterConfig, goFlagsConfig GoFlagsConfig) ([]string, error) {
+ var flags GinkgoFlags
+ flags = SuiteConfigFlags.WithPrefix("ginkgo")
+ flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...)
+ flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...)
+ flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...)
+ bindings := map[string]interface{}{
+ "S": &suiteConfig,
+ "R": &reporterConfig,
+ "Go": &goFlagsConfig,
+ }
+
+ return GenerateFlagArgs(flags, bindings)
+}
+
+// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary
+func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) {
+ flags := GoRunFlags.WithPrefix("test")
+ bindings := map[string]interface{}{
+ "Go": &goFlagsConfig,
+ }
+
+ args, err := GenerateFlagArgs(flags, bindings)
+ if err != nil {
+ return args, err
+ }
+ args = append(args, "--test.v")
+ return args, nil
+}
+
+// BuildRunCommandFlagSet builds the FlagSet for the `ginkgo run` command
+func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
+ flags := SuiteConfigFlags
+ flags = flags.CopyAppend(ReporterConfigFlags...)
+ flags = flags.CopyAppend(GinkgoCLISharedFlags...)
+ flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...)
+ flags = flags.CopyAppend(GinkgoCLIRunFlags...)
+ flags = flags.CopyAppend(GoBuildFlags...)
+ flags = flags.CopyAppend(GoRunFlags...)
+
+ bindings := map[string]interface{}{
+ "S": suiteConfig,
+ "R": reporterConfig,
+ "C": cliConfig,
+ "Go": goFlagsConfig,
+ "D": &deprecatedConfig{},
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, FlagSections)
+}
+
+// BuildWatchCommandFlagSet builds the FlagSet for the `ginkgo watch` command
+func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
+ flags := SuiteConfigFlags
+ flags = flags.CopyAppend(ReporterConfigFlags...)
+ flags = flags.CopyAppend(GinkgoCLISharedFlags...)
+ flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...)
+ flags = flags.CopyAppend(GinkgoCLIWatchFlags...)
+ flags = flags.CopyAppend(GoBuildFlags...)
+ flags = flags.CopyAppend(GoRunFlags...)
+
+ bindings := map[string]interface{}{
+ "S": suiteConfig,
+ "R": reporterConfig,
+ "C": cliConfig,
+ "Go": goFlagsConfig,
+ "D": &deprecatedConfig{},
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, FlagSections)
+}
+
+// BuildBuildCommandFlagSet builds the FlagSet for the `ginkgo build` command
+func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
+ flags := GinkgoCLISharedFlags
+ flags = flags.CopyAppend(GoBuildFlags...)
+
+ bindings := map[string]interface{}{
+ "C": cliConfig,
+ "Go": goFlagsConfig,
+ "D": &deprecatedConfig{},
+ }
+
+ flagSections := make(GinkgoFlagSections, len(FlagSections))
+ copy(flagSections, FlagSections)
+ for i := range flagSections {
+ if flagSections[i].Key == "multiple-suites" {
+ flagSections[i].Heading = "Building Multiple Suites"
+ }
+ if flagSections[i].Key == "go-build" {
+ flagSections[i] = GinkgoFlagSection{Key: "go-build", Style: "{{/}}", Heading: "Go Build Flags",
+ Description: "These flags are inherited from go build."}
+ }
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, flagSections)
+}
+
+func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) {
+ flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package")
+
+ bindings := map[string]interface{}{
+ "C": cliConfig,
+ }
+
+ flagSections := make(GinkgoFlagSections, len(FlagSections))
+ copy(flagSections, FlagSections)
+ for i := range flagSections {
+ if flagSections[i].Key == "multiple-suites" {
+ flagSections[i].Heading = "Fetching Labels from Multiple Suites"
+ }
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, flagSections)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
new file mode 100644
index 0000000..1792230
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
@@ -0,0 +1,141 @@
+package types
+
+import (
+ "strconv"
+ "time"
+)
+
+/*
+ A set of deprecations to make the transition from v1 to v2 easier for users who have written custom reporters.
+*/
+
+type SuiteSummary = DeprecatedSuiteSummary
+type SetupSummary = DeprecatedSetupSummary
+type SpecSummary = DeprecatedSpecSummary
+type SpecMeasurement = DeprecatedSpecMeasurement
+type SpecComponentType = NodeType
+type SpecFailure = DeprecatedSpecFailure
+
+var (
+ SpecComponentTypeInvalid = NodeTypeInvalid
+ SpecComponentTypeContainer = NodeTypeContainer
+ SpecComponentTypeIt = NodeTypeIt
+ SpecComponentTypeBeforeEach = NodeTypeBeforeEach
+ SpecComponentTypeJustBeforeEach = NodeTypeJustBeforeEach
+ SpecComponentTypeAfterEach = NodeTypeAfterEach
+ SpecComponentTypeJustAfterEach = NodeTypeJustAfterEach
+ SpecComponentTypeBeforeSuite = NodeTypeBeforeSuite
+ SpecComponentTypeSynchronizedBeforeSuite = NodeTypeSynchronizedBeforeSuite
+ SpecComponentTypeAfterSuite = NodeTypeAfterSuite
+ SpecComponentTypeSynchronizedAfterSuite = NodeTypeSynchronizedAfterSuite
+)
+
+type DeprecatedSuiteSummary struct {
+ SuiteDescription string
+ SuiteSucceeded bool
+ SuiteID string
+
+ NumberOfSpecsBeforeParallelization int
+ NumberOfTotalSpecs int
+ NumberOfSpecsThatWillBeRun int
+ NumberOfPendingSpecs int
+ NumberOfSkippedSpecs int
+ NumberOfPassedSpecs int
+ NumberOfFailedSpecs int
+ NumberOfFlakedSpecs int
+ RunTime time.Duration
+}
+
+type DeprecatedSetupSummary struct {
+ ComponentType SpecComponentType
+ CodeLocation CodeLocation
+
+ State SpecState
+ RunTime time.Duration
+ Failure SpecFailure
+
+ CapturedOutput string
+ SuiteID string
+}
+
+type DeprecatedSpecSummary struct {
+ ComponentTexts []string
+ ComponentCodeLocations []CodeLocation
+
+ State SpecState
+ RunTime time.Duration
+ Failure SpecFailure
+ IsMeasurement bool
+ NumberOfSamples int
+ Measurements map[string]*DeprecatedSpecMeasurement
+
+ CapturedOutput string
+ SuiteID string
+}
+
+func (s DeprecatedSpecSummary) HasFailureState() bool {
+ return s.State.Is(SpecStateFailureStates)
+}
+
+func (s DeprecatedSpecSummary) TimedOut() bool {
+ return false
+}
+
+func (s DeprecatedSpecSummary) Panicked() bool {
+ return s.State == SpecStatePanicked
+}
+
+func (s DeprecatedSpecSummary) Failed() bool {
+ return s.State == SpecStateFailed
+}
+
+func (s DeprecatedSpecSummary) Passed() bool {
+ return s.State == SpecStatePassed
+}
+
+func (s DeprecatedSpecSummary) Skipped() bool {
+ return s.State == SpecStateSkipped
+}
+
+func (s DeprecatedSpecSummary) Pending() bool {
+ return s.State == SpecStatePending
+}
+
+type DeprecatedSpecFailure struct {
+ Message string
+ Location CodeLocation
+ ForwardedPanic string
+
+ ComponentIndex int
+ ComponentType SpecComponentType
+ ComponentCodeLocation CodeLocation
+}
+
+type DeprecatedSpecMeasurement struct {
+ Name string
+ Info interface{}
+ Order int
+
+ Results []float64
+
+ Smallest float64
+ Largest float64
+ Average float64
+ StdDeviation float64
+
+ SmallestLabel string
+ LargestLabel string
+ AverageLabel string
+ Units string
+ Precision int
+}
+
+func (s DeprecatedSpecMeasurement) PrecisionFmt() string {
+ if s.Precision == 0 {
+ return "%f"
+ }
+
+ str := strconv.Itoa(s.Precision)
+
+ return "%." + str + "f"
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
new file mode 100644
index 0000000..e2519f6
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
@@ -0,0 +1,177 @@
+package types
+
+import (
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+)
+
+type Deprecation struct {
+ Message string
+ DocLink string
+ Version string
+}
+
+type deprecations struct{}
+
+var Deprecations = deprecations{}
+
+func (d deprecations) CustomReporter() Deprecation {
+ return Deprecation{
+ Message: "Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:",
+ DocLink: "removed-custom-reporters",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Async() Deprecation {
+ return Deprecation{
+ Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.",
+ DocLink: "removed-async-testing",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Measure() Deprecation {
+ return Deprecation{
+ Message: "Measure is deprecated and has been removed from Ginkgo V2. Any Measure tests in your spec will not run. Please migrate to gomega/gmeasure.",
+ DocLink: "removed-measure",
+ Version: "1.16.3",
+ }
+}
+
+func (d deprecations) ParallelNode() Deprecation {
+ return Deprecation{
+ Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.",
+ DocLink: "renamed-ginkgoparallelnode",
+ Version: "1.16.4",
+ }
+}
+
+func (d deprecations) CurrentGinkgoTestDescription() Deprecation {
+ return Deprecation{
+ Message: "CurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.",
+ DocLink: "changed-currentginkgotestdescription",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Convert() Deprecation {
+ return Deprecation{
+ Message: "The convert command is deprecated in Ginkgo V2",
+ DocLink: "removed-ginkgo-convert",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Blur() Deprecation {
+ return Deprecation{
+ Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Nodot() Deprecation {
+ return Deprecation{
+ Message: "The nodot command is deprecated in Ginkgo V2. Please either dot-import Ginkgo or use the package identifier in your code to references objects and types provided by Ginkgo and Gomega.",
+ DocLink: "removed-ginkgo-nodot",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) SuppressProgressReporting() Deprecation {
+ return Deprecation{
+ Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.",
+ Version: "2.5.0",
+ }
+}
+
+type DeprecationTracker struct {
+ deprecations map[Deprecation][]CodeLocation
+ lock *sync.Mutex
+}
+
+func NewDeprecationTracker() *DeprecationTracker {
+ return &DeprecationTracker{
+ deprecations: map[Deprecation][]CodeLocation{},
+ lock: &sync.Mutex{},
+ }
+}
+
+func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) {
+ ackVersion := os.Getenv("ACK_GINKGO_DEPRECATIONS")
+ if deprecation.Version != "" && ackVersion != "" {
+ ack := ParseSemVer(ackVersion)
+ version := ParseSemVer(deprecation.Version)
+ if ack.GreaterThanOrEqualTo(version) {
+ return
+ }
+ }
+
+ d.lock.Lock()
+ defer d.lock.Unlock()
+ if len(cl) == 1 {
+ d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0])
+ } else {
+ d.deprecations[deprecation] = []CodeLocation{}
+ }
+}
+
+func (d *DeprecationTracker) DidTrackDeprecations() bool {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+ return len(d.deprecations) > 0
+}
+
+func (d *DeprecationTracker) DeprecationsReport() string {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+ out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n")
+ out += formatter.F("{{light-yellow}}============================================={{/}}\n")
+ for deprecation, locations := range d.deprecations {
+ out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n")
+ if deprecation.DocLink != "" {
+ out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://onsi.github.io/ginkgo/MIGRATING_TO_V2#%s{{/}}\n", deprecation.DocLink)
+ }
+ for _, location := range locations {
+ out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location)
+ }
+ }
+ out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n")
+ out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", VERSION)
+ return out
+}
+
+type SemVer struct {
+ Major int
+ Minor int
+ Patch int
+}
+
+func (s SemVer) GreaterThanOrEqualTo(o SemVer) bool {
+ return (s.Major > o.Major) ||
+ (s.Major == o.Major && s.Minor > o.Minor) ||
+ (s.Major == o.Major && s.Minor == o.Minor && s.Patch >= o.Patch)
+}
+
+func ParseSemVer(semver string) SemVer {
+ out := SemVer{}
+ semver = strings.TrimFunc(semver, func(r rune) bool {
+ return !(unicode.IsNumber(r) || r == '.')
+ })
+ components := strings.Split(semver, ".")
+ if len(components) > 0 {
+ out.Major, _ = strconv.Atoi(components[0])
+ }
+ if len(components) > 1 {
+ out.Minor, _ = strconv.Atoi(components[1])
+ }
+ if len(components) > 2 {
+ out.Patch, _ = strconv.Atoi(components[2])
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
new file mode 100644
index 0000000..1d96ae0
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
@@ -0,0 +1,43 @@
+package types
+
+import "encoding/json"
+
+type EnumSupport struct {
+ toString map[uint]string
+ toEnum map[string]uint
+ maxEnum uint
+}
+
+func NewEnumSupport(toString map[uint]string) EnumSupport {
+ toEnum, maxEnum := map[string]uint{}, uint(0)
+ for k, v := range toString {
+ toEnum[v] = k
+ if maxEnum < k {
+ maxEnum = k
+ }
+ }
+ return EnumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum}
+}
+
+func (es EnumSupport) String(e uint) string {
+ if e > es.maxEnum {
+ return es.toString[0]
+ }
+ return es.toString[e]
+}
+
+func (es EnumSupport) UnmarshJSON(b []byte) (uint, error) {
+ var dec string
+ if err := json.Unmarshal(b, &dec); err != nil {
+ return 0, err
+ }
+ out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway
+ return out, nil
+}
+
+func (es EnumSupport) MarshJSON(e uint) ([]byte, error) {
+ if e == 0 || e > es.maxEnum {
+ return json.Marshal(nil)
+ }
+ return json.Marshal(es.toString[e])
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go
new file mode 100644
index 0000000..6bb72d0
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go
@@ -0,0 +1,639 @@
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+)
+
+type GinkgoError struct {
+ Heading string
+ Message string
+ DocLink string
+ CodeLocation CodeLocation
+}
+
+func (g GinkgoError) Error() string {
+ out := formatter.F("{{bold}}{{red}}%s{{/}}\n", g.Heading)
+ if (g.CodeLocation != CodeLocation{}) {
+ contentsOfLine := strings.TrimLeft(g.CodeLocation.ContentsOfLine(), "\t ")
+ if contentsOfLine != "" {
+ out += formatter.F("{{light-gray}}%s{{/}}\n", contentsOfLine)
+ }
+ out += formatter.F("{{gray}}%s{{/}}\n", g.CodeLocation)
+ }
+ if g.Message != "" {
+ out += formatter.Fiw(1, formatter.COLS, g.Message)
+ out += "\n\n"
+ }
+ if g.DocLink != "" {
+ out += formatter.Fiw(1, formatter.COLS, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}\n", g.DocLink)
+ }
+
+ return out
+}
+
+type ginkgoErrors struct{}
+
+var GinkgoErrors = ginkgoErrors{}
+
+func (g ginkgoErrors) UncaughtGinkgoPanic(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Your Test Panicked",
+ Message: `When you, or your assertion library, calls Ginkgo's Fail(),
+Ginkgo panics to prevent subsequent assertions from running.
+
+Normally Ginkgo rescues this panic so you shouldn't see it.
+
+However, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
+To circumvent this, you should call
+
+ defer GinkgoRecover()
+
+at the top of the goroutine that caused this panic.
+
+Alternatively, you may have made an assertion outside of a Ginkgo
+leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to
+an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).`,
+ DocLink: "mental-model-how-ginkgo-handles-failure",
+ CodeLocation: cl,
+ }
+}
+
+func (g ginkgoErrors) RerunningSuite() error {
+ return GinkgoError{
+ Heading: "Rerunning Suite",
+ Message: formatter.F(`It looks like you are calling RunSpecs more than once. Ginkgo does not support rerunning suites. If you want to rerun a suite try {{bold}}ginkgo --repeat=N{{/}} or {{bold}}ginkgo --until-it-fails{{/}}`),
+ DocLink: "repeating-spec-runs-and-managing-flaky-specs",
+ }
+}
+
+/* Tree construction errors */
+
+func (g ginkgoErrors) PushingNodeInRunPhase(nodeType NodeType, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node
+to the Ginkgo spec tree in a leaf node {{bold}}after{{/}} the specs started running.
+
+To enable randomization and parallelization Ginkgo requires the spec tree
+to be fully constructed up front. In practice, this means that you can
+only create nodes like {{bold}}[%s]{{/}} at the top-level or within the
+body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, nodeType, nodeType),
+ CodeLocation: cl,
+ DocLink: "mental-model-how-ginkgo-traverses-the-spec-hierarchy",
+ }
+}
+
+func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Assertion or Panic detected during tree construction",
+ Message: formatter.F(
+ `Ginkgo detected a panic while constructing the spec tree.
+You may be trying to make an assertion in the body of a container node
+(i.e. {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}).
+
+Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/}},
+{{bold}}It{{/}}, etc.
+
+{{bold}}Here's the content of the panic that was caught:{{/}}
+%v`, caughtPanic),
+ CodeLocation: cl,
+ DocLink: "no-assertions-in-container-nodes",
+ }
+}
+
+func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error {
+ docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
+ if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) {
+ docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite"
+ }
+
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a container node.
+
+{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType),
+ CodeLocation: cl,
+ DocLink: docLink,
+ }
+}
+
+func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error {
+ docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
+ if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) {
+ docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite"
+ }
+
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a leaf node after the spec started running.
+
+{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType),
+ CodeLocation: cl,
+ DocLink: docLink,
+ }
+}
+
+func (g ginkgoErrors) MultipleBeforeSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
+ return ginkgoErrorMultipleSuiteNodes("setup", nodeType, cl, earlierNodeType, earlierCodeLocation)
+}
+
+func (g ginkgoErrors) MultipleAfterSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
+ return ginkgoErrorMultipleSuiteNodes("teardown", nodeType, cl, earlierNodeType, earlierCodeLocation)
+}
+
+func ginkgoErrorMultipleSuiteNodes(setupOrTeardown string, nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node but
+you already have a {{bold}}[%s]{{/}} node defined at: {{gray}}%s{{/}}.
+
+Ginkgo only allows you to define one suite %s node.`, nodeType, earlierNodeType, earlierCodeLocation, setupOrTeardown),
+ CodeLocation: cl,
+ DocLink: "suite-setup-and-cleanup-beforesuite-and-aftersuite",
+ }
+}
+
+/* Decorator errors */
+func (g ginkgoErrors) InvalidDecoratorForNodeType(cl CodeLocation, nodeType NodeType, decorator string) error {
+ return GinkgoError{
+ Heading: "Invalid Decorator",
+ Message: formatter.F(`[%s] node cannot be passed a(n) '%s' decorator`, nodeType, decorator),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Combination of Decorators: Focused and Pending",
+ Message: formatter.F(`[%s] node was decorated with both Focus and Pending. At most one is allowed.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Combination of Decorators: FlakeAttempts and MustPassRepeatedly",
+ Message: formatter.F(`[%s] node was decorated with both FlakeAttempts and MustPassRepeatedly. At most one is allowed.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error {
+ return GinkgoError{
+ Heading: "Unknown Decorator",
+ Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyTypeForContainer(t reflect.Type, cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. You passed {{bold}}%s{{/}} instead.`, nodeType, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error {
+ mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}"
+ if nodeType.Is(NodeTypeContainer) {
+ mustGet = "{{bold}}func(){{/}}"
+ }
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[%s] node must be passed `+mustGet+`.
+You passed {{bold}}%s{{/}} instead.`, nodeType, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t reflect.Type, cl CodeLocation) error {
+ mustGet := "{{bold}}func() []byte{{/}}, {{bold}}func(ctx SpecContext) []byte{{/}}, or {{bold}}func(ctx context.Context) []byte{{/}}, {{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}"
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its first function.
+You passed {{bold}}%s{{/}} instead.`, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t reflect.Type, cl CodeLocation) error {
+ mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}, {{bold}}func([]byte){{/}}, {{bold}}func(ctx SpecContext, []byte){{/}}, or {{bold}}func(ctx context.Context, []byte){{/}}"
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its second function.
+You passed {{bold}}%s{{/}} instead.`, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Multiple Functions",
+ Message: formatter.F(`[%s] node must be passed a single function - but more than one was passed in.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Missing Functions",
+ Message: formatter.F(`[%s] node must be passed a single function - but none was passed in.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextNode(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod",
+ Message: formatter.F(`[%s] was passed NodeTimeout, SpecTimeout, or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`, nodeType),
+ CodeLocation: cl,
+ DocLink: "spec-timeouts-and-interruptible-nodes",
+ }
+}
+
+func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextCleanupNode(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod",
+ Message: formatter.F(`[DeferCleanup] was passed NodeTimeout or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`),
+ CodeLocation: cl,
+ DocLink: "spec-timeouts-and-interruptible-nodes",
+ }
+}
+
+/* Ordered Container errors */
+func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Serial Node in Non-Serial Ordered Container",
+ Message: formatter.F(`[%s] node was decorated with Serial but occurs in an Ordered container that is not marked Serial. Move the Serial decorator to the outer-most Ordered container to mark all ordered specs within the container as serial.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Setup Node not in Ordered Container",
+ Message: fmt.Sprintf("[%s] setup nodes must appear inside an Ordered container. They cannot be nested within other containers, even containers in an ordered container.", nodeType),
+ CodeLocation: cl,
+ DocLink: "ordered-containers",
+ }
+}
+
+func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "ContinueOnFailure not decorating an outermost Ordered Container",
+ Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.",
+ CodeLocation: cl,
+ DocLink: "ordered-containers",
+ }
+}
+
+/* DeferCleanup errors */
+func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DeferCleanup requires a valid function",
+ Message: "You must pass DeferCleanup a function to invoke. This function must return zero or one values - if it does return, it must return an error. The function can take arbitrarily many arguments and you should provide these to DeferCleanup to pass along to the function.",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DeferCleanup must be called inside a setup or subject node",
+ Message: "You must call DeferCleanup inside a setup node (e.g. BeforeEach, BeforeSuite, AfterAll...) or a subject node (i.e. It). You can't call DeferCleanup at the top-level or in a container node - use the After* family of setup nodes instead.",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType),
+ Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DeferCleanup cannot be called in a DeferCleanup callback",
+ Message: "Please inline your cleanup code - Ginkgo doesn't let you call DeferCleanup from within DeferCleanup",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+/* ReportEntry errors */
+func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error {
+ return GinkgoError{
+ Heading: "Too Many ReportEntry Values",
+ Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg),
+ CodeLocation: cl,
+ DocLink: "attaching-data-to-reports",
+ }
+}
+
+func (g ginkgoErrors) AddReportEntryNotDuringRunPhase(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(`It looks like you are calling {{bold}}AddGinkgoReport{{/}} outside of a running spec. Make sure you call {{bold}}AddGinkgoReport{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`),
+ CodeLocation: cl,
+ DocLink: "attaching-data-to-reports",
+ }
+}
+
+/* By errors */
+func (g ginkgoErrors) ByNotDuringRunPhase(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(`It looks like you are calling {{bold}}By{{/}} outside of a running spec. Make sure you call {{bold}}By{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`),
+ CodeLocation: cl,
+ DocLink: "documenting-complex-specs-by",
+ }
+}
+
+/* FileFilter and SkipFilter errors */
+func (g ginkgoErrors) InvalidFileFilter(filter string) error {
+ return GinkgoError{
+ Heading: "Invalid File Filter",
+ Message: fmt.Sprintf(`The provided file filter: "%s" is invalid. File filters must have the format "file", "file:lines" where "file" is a regular expression that will match against the file path and lines is a comma-separated list of integers (e.g. file:1,5,7) or line-ranges (e.g. file:1-3,5-9) or both (e.g. file:1,5-9)`, filter),
+ DocLink: "filtering-specs",
+ }
+}
+
+func (g ginkgoErrors) InvalidFileFilterRegularExpression(filter string, err error) error {
+ return GinkgoError{
+ Heading: "Invalid File Filter Regular Expression",
+ Message: fmt.Sprintf(`The provided file filter: "%s" included an invalid regular expression. regexp.Compile error: %s`, filter, err),
+ DocLink: "filtering-specs",
+ }
+}
+
+/* Label Errors */
+func (g ginkgoErrors) SyntaxErrorParsingLabelFilter(input string, location int, error string) error {
+ var message string
+ if location >= 0 {
+ for i, r := range input {
+ if i == location {
+ message += "{{red}}{{bold}}{{underline}}"
+ }
+ message += string(r)
+ if i == location {
+ message += "{{/}}"
+ }
+ }
+ } else {
+ message = input
+ }
+ message += "\n" + error
+ return GinkgoError{
+ Heading: "Syntax Error Parsing Label Filter",
+ Message: message,
+ DocLink: "spec-labels",
+ }
+}
+
+func (g ginkgoErrors) InvalidLabel(label string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid Label",
+ Message: fmt.Sprintf("'%s' is an invalid label. Labels cannot contain of the following characters: '&|!,()/'", label),
+ CodeLocation: cl,
+ DocLink: "spec-labels",
+ }
+}
+
+func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid Empty Label",
+ Message: "Labels cannot be empty",
+ CodeLocation: cl,
+ DocLink: "spec-labels",
+ }
+}
+
+/* Table errors */
+func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DescribeTable passed multiple functions",
+ Message: "It looks like you are passing multiple functions into DescribeTable. Only one function can be passed in. This function will be called for each Entry in the table.",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid Entry description",
+ Message: "Entry description functions must be a string, a function that accepts the entry parameters and returns a string, or nil.",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "No parameters have been passed to the Table Function",
+ Message: "The Table Function expected at least 1 parameter",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DescribeTable passed incorrect parameter type",
+ Message: fmt.Sprintf("Parameter #%d passed to DescribeTable is of incorrect type <%s>", i, name),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) TooFewParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Too few parameters passed in to %s", kind),
+ Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) TooManyParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Too many parameters passed in to %s", kind),
+ Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) IncorrectParameterTypeToTableFunction(i int, expected, actual reflect.Type, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind),
+ Message: fmt.Sprintf("The %s expected parameter #%d to be of type <%s> but you passed in <%s>", kind, i, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, actual reflect.Type, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind),
+ Message: fmt.Sprintf("The %s expected its variadic parameters to be of type <%s> but you passed in <%s>", kind, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Contexts cannot be used in subtree tables",
+ Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+/* Parallel Synchronization errors */
+
+func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error {
+ return GinkgoError{
+ Heading: "Test Report unavailable because a Ginkgo parallel process disappeared",
+ Message: "The aggregated report could not be fetched for a ReportAfterSuite node. A Ginkgo parallel process disappeared before it could finish reporting.",
+ }
+}
+
+func (g ginkgoErrors) SynchronizedBeforeSuiteFailedOnProc1() error {
+ return GinkgoError{
+ Heading: "SynchronizedBeforeSuite failed on Ginkgo parallel process #1",
+ Message: "The first SynchronizedBeforeSuite function running on Ginkgo parallel process #1 failed. This suite will now abort.",
+ }
+}
+
+func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error {
+ return GinkgoError{
+ Heading: "Process #1 disappeared before SynchronizedBeforeSuite could report back",
+ Message: "Ginkgo parallel process #1 disappeared before the first SynchronizedBeforeSuite function completed. This suite will now abort.",
+ }
+}
+
+/* Configuration errors */
+
+func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error {
+ return GinkgoError{
+ Heading: "Unknown Type passed to RunSpecs",
+ Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value),
+ }
+}
+
+var sharedParallelErrorMessage = "It looks like you are trying to run specs in parallel with go test.\nThis is unsupported and you should use the ginkgo CLI instead."
+
+func (g ginkgoErrors) InvalidParallelTotalConfiguration() error {
+ return GinkgoError{
+ Heading: "-ginkgo.parallel.total must be >= 1",
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) InvalidParallelProcessConfiguration() error {
+ return GinkgoError{
+ Heading: "-ginkgo.parallel.process is one-indexed and must be <= ginkgo.parallel.total",
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) MissingParallelHostConfiguration() error {
+ return GinkgoError{
+ Heading: "-ginkgo.parallel.host is missing",
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) UnreachableParallelHost(host string) error {
+ return GinkgoError{
+ Heading: "Could not reach ginkgo.parallel.host:" + host,
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) DryRunInParallelConfiguration() error {
+ return GinkgoError{
+ Heading: "Ginkgo only performs -dryRun in serial mode.",
+ Message: "Please try running ginkgo -dryRun again, but without -p or -procs to ensure the suite is running in series.",
+ }
+}
+
+func (g ginkgoErrors) GracePeriodCannotBeZero() error {
+ return GinkgoError{
+ Heading: "Ginkgo requires a positive --grace-period.",
+ Message: "Please set --grace-period to a positive duration. The default is 30s.",
+ }
+}
+
+func (g ginkgoErrors) ConflictingVerbosityConfiguration() error {
+ return GinkgoError{
+ Heading: "Conflicting reporter verbosity settings.",
+ Message: "You can't set more than one of -v, -vv and --succinct. Please pick one!",
+ }
+}
+
+func (g ginkgoErrors) InvalidOutputInterceptorModeConfiguration(value string) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Invalid value '%s' for --output-interceptor-mode.", value),
+ Message: "You must choose one of 'dup', 'swap', or 'none'.",
+ }
+}
+
+func (g ginkgoErrors) InvalidGoFlagCount() error {
+ return GinkgoError{
+ Heading: "Use of go test -count",
+ Message: "Ginkgo does not support using go test -count to rerun suites. Only -count=1 is allowed. To repeat suite runs, please use the ginkgo cli and `ginkgo -until-it-fails` or `ginkgo -repeat=N`.",
+ }
+}
+
+func (g ginkgoErrors) InvalidGoFlagParallel() error {
+ return GinkgoError{
+ Heading: "Use of go test -parallel",
+ Message: "Go test's implementation of parallelization does not actually parallelize Ginkgo specs. Please use the ginkgo cli and `ginkgo -p` or `ginkgo -procs=N` instead.",
+ }
+}
+
+func (g ginkgoErrors) BothRepeatAndUntilItFails() error {
+ return GinkgoError{
+ Heading: "--repeat and --until-it-fails are both set",
+ Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?",
+ }
+}
+
+/* Stack-Trace parsing errors */
+
+func (g ginkgoErrors) FailedToParseStackTrace(message string) error {
+ return GinkgoError{
+ Heading: "Failed to Parse Stack Trace",
+ Message: message,
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go
new file mode 100644
index 0000000..cc21df7
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go
@@ -0,0 +1,106 @@
+package types
+
+import (
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+func ParseFileFilters(filters []string) (FileFilters, error) {
+ ffs := FileFilters{}
+ for _, filter := range filters {
+ ff := FileFilter{}
+ if filter == "" {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ components := strings.Split(filter, ":")
+ if !(len(components) == 1 || len(components) == 2) {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+
+ var err error
+ ff.Filename, err = regexp.Compile(components[0])
+ if err != nil {
+ return nil, err
+ }
+ if len(components) == 2 {
+ lineFilters := strings.Split(components[1], ",")
+ for _, lineFilter := range lineFilters {
+ components := strings.Split(lineFilter, "-")
+ if len(components) == 1 {
+ line, err := strconv.Atoi(strings.TrimSpace(components[0]))
+ if err != nil {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ ff.LineFilters = append(ff.LineFilters, LineFilter{line, line + 1})
+ } else if len(components) == 2 {
+ line1, err := strconv.Atoi(strings.TrimSpace(components[0]))
+ if err != nil {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ line2, err := strconv.Atoi(strings.TrimSpace(components[1]))
+ if err != nil {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ ff.LineFilters = append(ff.LineFilters, LineFilter{line1, line2})
+ } else {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ }
+ }
+ ffs = append(ffs, ff)
+ }
+ return ffs, nil
+}
+
+type FileFilter struct {
+ Filename *regexp.Regexp
+ LineFilters LineFilters
+}
+
+func (f FileFilter) Matches(locations []CodeLocation) bool {
+ for _, location := range locations {
+ if f.Filename.MatchString(location.FileName) &&
+ f.LineFilters.Matches(location.LineNumber) {
+ return true
+ }
+
+ }
+ return false
+}
+
+type FileFilters []FileFilter
+
+func (ffs FileFilters) Matches(locations []CodeLocation) bool {
+ for _, ff := range ffs {
+ if ff.Matches(locations) {
+ return true
+ }
+ }
+
+ return false
+}
+
+type LineFilter struct {
+ Min int
+ Max int
+}
+
+func (lf LineFilter) Matches(line int) bool {
+ return lf.Min <= line && line < lf.Max
+}
+
+type LineFilters []LineFilter
+
+func (lfs LineFilters) Matches(line int) bool {
+ if len(lfs) == 0 {
+ return true
+ }
+
+ for _, lf := range lfs {
+ if lf.Matches(line) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go
new file mode 100644
index 0000000..de69f30
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go
@@ -0,0 +1,490 @@
+package types
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+)
+
+type GinkgoFlag struct {
+ Name string
+ KeyPath string
+ SectionKey string
+
+ Usage string
+ UsageArgument string
+ UsageDefaultValue string
+
+ DeprecatedName string
+ DeprecatedDocLink string
+ DeprecatedVersion string
+
+ ExportAs string
+ AlwaysExport bool
+}
+
+type GinkgoFlags []GinkgoFlag
+
+func (f GinkgoFlags) CopyAppend(flags ...GinkgoFlag) GinkgoFlags {
+ out := GinkgoFlags{}
+ out = append(out, f...)
+ out = append(out, flags...)
+ return out
+}
+
+func (f GinkgoFlags) WithPrefix(prefix string) GinkgoFlags {
+ if prefix == "" {
+ return f
+ }
+ out := GinkgoFlags{}
+ for _, flag := range f {
+ if flag.Name != "" {
+ flag.Name = prefix + "." + flag.Name
+ }
+ if flag.DeprecatedName != "" {
+ flag.DeprecatedName = prefix + "." + flag.DeprecatedName
+ }
+ if flag.ExportAs != "" {
+ flag.ExportAs = prefix + "." + flag.ExportAs
+ }
+ out = append(out, flag)
+ }
+ return out
+}
+
+func (f GinkgoFlags) SubsetWithNames(names ...string) GinkgoFlags {
+ out := GinkgoFlags{}
+ for _, flag := range f {
+ for _, name := range names {
+ if flag.Name == name {
+ out = append(out, flag)
+ break
+ }
+ }
+ }
+ return out
+}
+
+type GinkgoFlagSection struct {
+ Key string
+ Style string
+ Succinct bool
+ Heading string
+ Description string
+}
+
+type GinkgoFlagSections []GinkgoFlagSection
+
+func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) {
+ for _, section := range gfs {
+ if section.Key == key {
+ return section, true
+ }
+ }
+
+ return GinkgoFlagSection{}, false
+}
+
+type GinkgoFlagSet struct {
+ flags GinkgoFlags
+ bindings interface{}
+
+ sections GinkgoFlagSections
+ extraGoFlagsSection GinkgoFlagSection
+
+ flagSet *flag.FlagSet
+}
+
+// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet
+func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) {
+ return bindFlagSet(GinkgoFlagSet{
+ flags: flags,
+ bindings: bindings,
+ sections: sections,
+ }, nil)
+}
+
+// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet
+func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) {
+ return bindFlagSet(GinkgoFlagSet{
+ flags: flags,
+ bindings: bindings,
+ sections: sections,
+ extraGoFlagsSection: extraGoFlagsSection,
+ }, flagSet)
+}
+
+func bindFlagSet(f GinkgoFlagSet, flagSet *flag.FlagSet) (GinkgoFlagSet, error) {
+ if flagSet == nil {
+ f.flagSet = flag.NewFlagSet("", flag.ContinueOnError)
+ //suppress all output as Ginkgo is responsible for formatting usage
+ f.flagSet.SetOutput(io.Discard)
+ } else {
+ f.flagSet = flagSet
+ //we're piggybacking on an existing flagset (typically go test) so we have limited control
+ //on user feedback
+ f.flagSet.Usage = f.substituteUsage
+ }
+
+ for _, flag := range f.flags {
+ name := flag.Name
+
+ deprecatedUsage := "[DEPRECATED]"
+ deprecatedName := flag.DeprecatedName
+ if name != "" {
+ deprecatedUsage = fmt.Sprintf("[DEPRECATED] use --%s instead", name)
+ } else if flag.Usage != "" {
+ deprecatedUsage += " " + flag.Usage
+ }
+
+ value, ok := valueAtKeyPath(f.bindings, flag.KeyPath)
+ if !ok {
+ return GinkgoFlagSet{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath)
+ }
+
+ iface, addr := value.Interface(), value.Addr().Interface()
+
+ switch value.Type() {
+ case reflect.TypeOf(string("")):
+ if name != "" {
+ f.flagSet.StringVar(addr.(*string), name, iface.(string), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.StringVar(addr.(*string), deprecatedName, iface.(string), deprecatedUsage)
+ }
+ case reflect.TypeOf(int64(0)):
+ if name != "" {
+ f.flagSet.Int64Var(addr.(*int64), name, iface.(int64), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.Int64Var(addr.(*int64), deprecatedName, iface.(int64), deprecatedUsage)
+ }
+ case reflect.TypeOf(float64(0)):
+ if name != "" {
+ f.flagSet.Float64Var(addr.(*float64), name, iface.(float64), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.Float64Var(addr.(*float64), deprecatedName, iface.(float64), deprecatedUsage)
+ }
+ case reflect.TypeOf(int(0)):
+ if name != "" {
+ f.flagSet.IntVar(addr.(*int), name, iface.(int), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.IntVar(addr.(*int), deprecatedName, iface.(int), deprecatedUsage)
+ }
+ case reflect.TypeOf(bool(true)):
+ if name != "" {
+ f.flagSet.BoolVar(addr.(*bool), name, iface.(bool), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.BoolVar(addr.(*bool), deprecatedName, iface.(bool), deprecatedUsage)
+ }
+ case reflect.TypeOf(time.Duration(0)):
+ if name != "" {
+ f.flagSet.DurationVar(addr.(*time.Duration), name, iface.(time.Duration), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.DurationVar(addr.(*time.Duration), deprecatedName, iface.(time.Duration), deprecatedUsage)
+ }
+
+ case reflect.TypeOf([]string{}):
+ if name != "" {
+ f.flagSet.Var(stringSliceVar{value}, name, flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.Var(stringSliceVar{value}, deprecatedName, deprecatedUsage)
+ }
+ default:
+ return GinkgoFlagSet{}, fmt.Errorf("unsupported type %T", iface)
+ }
+ }
+
+ return f, nil
+}
+
+func (f GinkgoFlagSet) IsZero() bool {
+ return f.flagSet == nil
+}
+
+func (f GinkgoFlagSet) WasSet(name string) bool {
+ found := false
+ f.flagSet.Visit(func(f *flag.Flag) {
+ if f.Name == name {
+ found = true
+ }
+ })
+
+ return found
+}
+
+func (f GinkgoFlagSet) Lookup(name string) *flag.Flag {
+ return f.flagSet.Lookup(name)
+}
+
+func (f GinkgoFlagSet) Parse(args []string) ([]string, error) {
+ if f.IsZero() {
+ return args, nil
+ }
+ err := f.flagSet.Parse(args)
+ if err != nil {
+ return []string{}, err
+ }
+ return f.flagSet.Args(), nil
+}
+
+func (f GinkgoFlagSet) ValidateDeprecations(deprecationTracker *DeprecationTracker) {
+ if f.IsZero() {
+ return
+ }
+ f.flagSet.Visit(func(flag *flag.Flag) {
+ for _, ginkgoFlag := range f.flags {
+ if ginkgoFlag.DeprecatedName != "" && strings.HasSuffix(flag.Name, ginkgoFlag.DeprecatedName) {
+ message := fmt.Sprintf("--%s is deprecated", ginkgoFlag.DeprecatedName)
+ if ginkgoFlag.Name != "" {
+ message = fmt.Sprintf("--%s is deprecated, use --%s instead", ginkgoFlag.DeprecatedName, ginkgoFlag.Name)
+ } else if ginkgoFlag.Usage != "" {
+ message += " " + ginkgoFlag.Usage
+ }
+
+ deprecationTracker.TrackDeprecation(Deprecation{
+ Message: message,
+ DocLink: ginkgoFlag.DeprecatedDocLink,
+ Version: ginkgoFlag.DeprecatedVersion,
+ })
+ }
+ }
+ })
+}
+
+func (f GinkgoFlagSet) Usage() string {
+ if f.IsZero() {
+ return ""
+ }
+ groupedFlags := map[GinkgoFlagSection]GinkgoFlags{}
+ ungroupedFlags := GinkgoFlags{}
+ managedFlags := map[string]bool{}
+ extraGoFlags := []*flag.Flag{}
+
+ for _, flag := range f.flags {
+ managedFlags[flag.Name] = true
+ managedFlags[flag.DeprecatedName] = true
+
+ if flag.Name == "" {
+ continue
+ }
+
+ section, ok := f.sections.Lookup(flag.SectionKey)
+ if ok {
+ groupedFlags[section] = append(groupedFlags[section], flag)
+ } else {
+ ungroupedFlags = append(ungroupedFlags, flag)
+ }
+ }
+
+ f.flagSet.VisitAll(func(flag *flag.Flag) {
+ if !managedFlags[flag.Name] {
+ extraGoFlags = append(extraGoFlags, flag)
+ }
+ })
+
+ out := ""
+ for _, section := range f.sections {
+ flags := groupedFlags[section]
+ if len(flags) == 0 {
+ continue
+ }
+ out += f.usageForSection(section)
+ if section.Succinct {
+ succinctFlags := []string{}
+ for _, flag := range flags {
+ if flag.Name != "" {
+ succinctFlags = append(succinctFlags, fmt.Sprintf("--%s", flag.Name))
+ }
+ }
+ out += formatter.Fiw(1, formatter.COLS, section.Style+strings.Join(succinctFlags, ", ")+"{{/}}\n")
+ } else {
+ for _, flag := range flags {
+ out += f.usageForFlag(flag, section.Style)
+ }
+ }
+ out += "\n"
+ }
+ if len(ungroupedFlags) > 0 {
+ for _, flag := range ungroupedFlags {
+ out += f.usageForFlag(flag, "")
+ }
+ out += "\n"
+ }
+ if len(extraGoFlags) > 0 {
+ out += f.usageForSection(f.extraGoFlagsSection)
+ for _, goFlag := range extraGoFlags {
+ out += f.usageForGoFlag(goFlag)
+ }
+ }
+
+ return out
+}
+
+func (f GinkgoFlagSet) substituteUsage() {
+ fmt.Fprintln(f.flagSet.Output(), f.Usage())
+}
+
+func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) {
+ if len(keyPath) == 0 {
+ return reflect.Value{}, false
+ }
+
+ val := reflect.ValueOf(root)
+ components := strings.Split(keyPath, ".")
+ for _, component := range components {
+ val = reflect.Indirect(val)
+ switch val.Kind() {
+ case reflect.Map:
+ val = val.MapIndex(reflect.ValueOf(component))
+ if val.Kind() == reflect.Interface {
+ val = reflect.ValueOf(val.Interface())
+ }
+ case reflect.Struct:
+ val = val.FieldByName(component)
+ default:
+ return reflect.Value{}, false
+ }
+ if (val == reflect.Value{}) {
+ return reflect.Value{}, false
+ }
+ }
+
+ return val, true
+}
+
+func (f GinkgoFlagSet) usageForSection(section GinkgoFlagSection) string {
+ out := formatter.F(section.Style + "{{bold}}{{underline}}" + section.Heading + "{{/}}\n")
+ if section.Description != "" {
+ out += formatter.Fiw(0, formatter.COLS, section.Description+"\n")
+ }
+ return out
+}
+
+func (f GinkgoFlagSet) usageForFlag(flag GinkgoFlag, style string) string {
+ argument := flag.UsageArgument
+ defValue := flag.UsageDefaultValue
+ if argument == "" {
+ value, _ := valueAtKeyPath(f.bindings, flag.KeyPath)
+ switch value.Type() {
+ case reflect.TypeOf(string("")):
+ argument = "string"
+ case reflect.TypeOf(int64(0)), reflect.TypeOf(int(0)):
+ argument = "int"
+ case reflect.TypeOf(time.Duration(0)):
+ argument = "duration"
+ case reflect.TypeOf(float64(0)):
+ argument = "float"
+ case reflect.TypeOf([]string{}):
+ argument = "string"
+ }
+ }
+ if argument != "" {
+ argument = "[" + argument + "] "
+ }
+ if defValue != "" {
+ defValue = fmt.Sprintf("(default: %s)", defValue)
+ }
+ hyphens := "--"
+ if len(flag.Name) == 1 {
+ hyphens = "-"
+ }
+
+ out := formatter.Fi(1, style+"%s%s{{/}} %s{{gray}}%s{{/}}\n", hyphens, flag.Name, argument, defValue)
+ out += formatter.Fiw(2, formatter.COLS, "{{light-gray}}%s{{/}}\n", flag.Usage)
+ return out
+}
+
+func (f GinkgoFlagSet) usageForGoFlag(goFlag *flag.Flag) string {
+ //Taken directly from the flag package
+ out := fmt.Sprintf(" -%s", goFlag.Name)
+ name, usage := flag.UnquoteUsage(goFlag)
+ if len(name) > 0 {
+ out += " " + name
+ }
+ if len(out) <= 4 {
+ out += "\t"
+ } else {
+ out += "\n \t"
+ }
+ out += strings.ReplaceAll(usage, "\n", "\n \t")
+ out += "\n"
+ return out
+}
+
+type stringSliceVar struct {
+ slice reflect.Value
+}
+
+func (ssv stringSliceVar) String() string { return "" }
+func (ssv stringSliceVar) Set(s string) error {
+ ssv.slice.Set(reflect.AppendSlice(ssv.slice, reflect.ValueOf([]string{s})))
+ return nil
+}
+
+// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
+func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) {
+ result := []string{}
+ for _, flag := range flags {
+ name := flag.ExportAs
+ if name == "" {
+ name = flag.Name
+ }
+ if name == "" {
+ continue
+ }
+
+ value, ok := valueAtKeyPath(bindings, flag.KeyPath)
+ if !ok {
+ return []string{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath)
+ }
+
+ iface := value.Interface()
+ switch value.Type() {
+ case reflect.TypeOf(string("")):
+ if iface.(string) != "" || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%s", name, iface))
+ }
+ case reflect.TypeOf(int64(0)):
+ if iface.(int64) != 0 || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%d", name, iface))
+ }
+ case reflect.TypeOf(float64(0)):
+ if iface.(float64) != 0 || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%f", name, iface))
+ }
+ case reflect.TypeOf(int(0)):
+ if iface.(int) != 0 || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%d", name, iface))
+ }
+ case reflect.TypeOf(bool(true)):
+ if iface.(bool) {
+ result = append(result, fmt.Sprintf("--%s", name))
+ }
+ case reflect.TypeOf(time.Duration(0)):
+ if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%s", name, iface))
+ }
+
+ case reflect.TypeOf([]string{}):
+ strings := iface.([]string)
+ for _, s := range strings {
+ result = append(result, fmt.Sprintf("--%s=%s", name, s))
+ }
+ default:
+ return []string{}, fmt.Errorf("unsupported type %T", iface)
+ }
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
new file mode 100644
index 0000000..7fdc8aa
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
@@ -0,0 +1,583 @@
+package types
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+var DEBUG_LABEL_FILTER_PARSING = false
+
+type LabelFilter func([]string) bool
+
+func matchLabelAction(label string) LabelFilter {
+ expected := strings.ToLower(label)
+ return func(labels []string) bool {
+ for i := range labels {
+ if strings.ToLower(labels[i]) == expected {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func matchLabelRegexAction(regex *regexp.Regexp) LabelFilter {
+ return func(labels []string) bool {
+ for i := range labels {
+ if regex.MatchString(labels[i]) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func notAction(filter LabelFilter) LabelFilter {
+ return func(labels []string) bool { return !filter(labels) }
+}
+
+func andAction(a, b LabelFilter) LabelFilter {
+ return func(labels []string) bool { return a(labels) && b(labels) }
+}
+
+func orAction(a, b LabelFilter) LabelFilter {
+ return func(labels []string) bool { return a(labels) || b(labels) }
+}
+
+func labelSetFor(key string, labels []string) map[string]bool {
+ key = strings.ToLower(strings.TrimSpace(key))
+ out := map[string]bool{}
+ for _, label := range labels {
+ components := strings.SplitN(label, ":", 2)
+ if len(components) < 2 {
+ continue
+ }
+ if key == strings.ToLower(strings.TrimSpace(components[0])) {
+ out[strings.ToLower(strings.TrimSpace(components[1]))] = true
+ }
+ }
+
+ return out
+}
+
+func isEmptyLabelSetAction(key string) LabelFilter {
+ return func(labels []string) bool {
+ return len(labelSetFor(key, labels)) == 0
+ }
+}
+
+func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for _, value := range expectedValues {
+ if set[value] {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for _, value := range expectedValues {
+ if !set[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ if len(set) != len(expectedValues) {
+ return false
+ }
+ for _, value := range expectedValues {
+ if !set[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter {
+ expectedSet := map[string]bool{}
+ for _, value := range expectedValues {
+ expectedSet[value] = true
+ }
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for value := range set {
+ if !expectedSet[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+type lfToken uint
+
+const (
+ lfTokenInvalid lfToken = iota
+
+ lfTokenRoot
+ lfTokenOpenGroup
+ lfTokenCloseGroup
+ lfTokenNot
+ lfTokenAnd
+ lfTokenOr
+ lfTokenRegexp
+ lfTokenLabel
+ lfTokenSetKey
+ lfTokenSetOperation
+ lfTokenSetArgument
+ lfTokenEOF
+)
+
+func (l lfToken) Precedence() int {
+ switch l {
+ case lfTokenRoot, lfTokenOpenGroup:
+ return 0
+ case lfTokenOr:
+ return 1
+ case lfTokenAnd:
+ return 2
+ case lfTokenNot:
+ return 3
+ case lfTokenSetOperation:
+ return 4
+ }
+ return -1
+}
+
+func (l lfToken) String() string {
+ switch l {
+ case lfTokenRoot:
+ return "ROOT"
+ case lfTokenOpenGroup:
+ return "("
+ case lfTokenCloseGroup:
+ return ")"
+ case lfTokenNot:
+ return "!"
+ case lfTokenAnd:
+ return "&&"
+ case lfTokenOr:
+ return "||"
+ case lfTokenRegexp:
+ return "/regexp/"
+ case lfTokenLabel:
+ return "label"
+ case lfTokenSetKey:
+ return "set_key"
+ case lfTokenSetOperation:
+ return "set_operation"
+ case lfTokenSetArgument:
+ return "set_argument"
+ case lfTokenEOF:
+ return "EOF"
+ }
+ return "INVALID"
+}
+
+type treeNode struct {
+ token lfToken
+ location int
+ value string
+
+ parent *treeNode
+ leftNode *treeNode
+ rightNode *treeNode
+}
+
+func (tn *treeNode) setRightNode(node *treeNode) {
+ tn.rightNode = node
+ node.parent = tn
+}
+
+func (tn *treeNode) setLeftNode(node *treeNode) {
+ tn.leftNode = node
+ node.parent = tn
+}
+
+func (tn *treeNode) firstAncestorWithPrecedenceLEQ(precedence int) *treeNode {
+ if tn.token.Precedence() <= precedence {
+ return tn
+ }
+ return tn.parent.firstAncestorWithPrecedenceLEQ(precedence)
+}
+
+func (tn *treeNode) firstUnmatchedOpenNode() *treeNode {
+ if tn.token == lfTokenOpenGroup {
+ return tn
+ }
+ if tn.parent == nil {
+ return nil
+ }
+ return tn.parent.firstUnmatchedOpenNode()
+}
+
+func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) {
+ switch tn.token {
+ case lfTokenOpenGroup:
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, "Mismatched '(' - could not find matching ')'.")
+ case lfTokenLabel:
+ return matchLabelAction(tn.value), nil
+ case lfTokenRegexp:
+ re, err := regexp.Compile(tn.value)
+ if err != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err))
+ }
+ return matchLabelRegexAction(re), nil
+ case lfTokenSetOperation:
+ tokenSetOperation := strings.ToLower(tn.value)
+ if tokenSetOperation == "isempty" {
+ return isEmptyLabelSetAction(tn.leftNode.value), nil
+ }
+ if tn.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value))
+ }
+
+ rawValues := strings.Split(tn.rightNode.value, ",")
+ values := make([]string, len(rawValues))
+ for i := range rawValues {
+ values[i] = strings.ToLower(strings.TrimSpace(rawValues[i]))
+ if strings.ContainsAny(values[i], "&|!,()/") {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i]))
+ } else if values[i] == "" {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.")
+ }
+ }
+ switch tokenSetOperation {
+ case "containsany":
+ return containsAnyLabelSetAction(tn.leftNode.value, values), nil
+ case "containsall":
+ return containsAllLabelSetAction(tn.leftNode.value, values), nil
+ case "consistsof":
+ return consistsOfLabelSetAction(tn.leftNode.value, values), nil
+ case "issubsetof":
+ return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil
+ }
+ }
+
+ if tn.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, -1, "Unexpected EOF.")
+ }
+ rightLF, err := tn.rightNode.constructLabelFilter(input)
+ if err != nil {
+ return nil, err
+ }
+
+ switch tn.token {
+ case lfTokenRoot, lfTokenCloseGroup:
+ return rightLF, nil
+ case lfTokenNot:
+ return notAction(rightLF), nil
+ }
+
+ if tn.leftNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Malformed tree - '%s' is missing left operand.", tn.token))
+ }
+ leftLF, err := tn.leftNode.constructLabelFilter(input)
+ if err != nil {
+ return nil, err
+ }
+
+ switch tn.token {
+ case lfTokenAnd:
+ return andAction(leftLF, rightLF), nil
+ case lfTokenOr:
+ return orAction(leftLF, rightLF), nil
+ }
+
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Invalid token '%s'.", tn.token))
+}
+
+func (tn *treeNode) tokenString() string {
+ out := fmt.Sprintf("<%s", tn.token)
+ if tn.value != "" {
+ out += " | " + tn.value
+ }
+ out += ">"
+ return out
+}
+
+func (tn *treeNode) toString(indent int) string {
+ out := tn.tokenString() + "\n"
+ if tn.leftNode != nil {
+ out += fmt.Sprintf("%s |_(L)_%s", strings.Repeat(" ", indent), tn.leftNode.toString(indent+1))
+ }
+ if tn.rightNode != nil {
+ out += fmt.Sprintf("%s |_(R)_%s", strings.Repeat(" ", indent), tn.rightNode.toString(indent+1))
+ }
+ return out
+}
+
+var validSetOperations = map[string]string{
+ "containsany": "containsAny",
+ "containsall": "containsAll",
+ "consistsof": "consistsOf",
+ "issubsetof": "isSubsetOf",
+ "isempty": "isEmpty",
+}
+
+func tokenize(input string) func() (*treeNode, error) {
+ lastToken := lfTokenInvalid
+ lastValue := ""
+ runes, i := []rune(input), 0
+
+ peekIs := func(r rune) bool {
+ if i+1 < len(runes) {
+ return runes[i+1] == r
+ }
+ return false
+ }
+
+ consumeUntil := func(cutset string) (string, int) {
+ j := i
+ for ; j < len(runes); j++ {
+ if strings.IndexRune(cutset, runes[j]) >= 0 {
+ break
+ }
+ }
+ return string(runes[i:j]), j - i
+ }
+
+ return func() (*treeNode, error) {
+ for i < len(runes) && runes[i] == ' ' {
+ i += 1
+ }
+
+ if i >= len(runes) {
+ return &treeNode{token: lfTokenEOF}, nil
+ }
+
+ node := &treeNode{location: i}
+ defer func() {
+ lastToken = node.token
+ lastValue = node.value
+ }()
+
+ if lastToken == lfTokenSetKey {
+ //we should get a valid set operation next
+ value, n := consumeUntil(" )")
+ if validSetOperations[strings.ToLower(value)] == "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value))
+ }
+ i += n
+ node.token, node.value = lfTokenSetOperation, value
+ return node, nil
+ }
+ if lastToken == lfTokenSetOperation {
+ //we should get an argument next, if we aren't isempty
+ var arg = ""
+ origI := i
+ if runes[i] == '{' {
+ i += 1
+ value, n := consumeUntil("}")
+ if i+n >= len(runes) {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?")
+ }
+ i += n + 1
+ arg = value
+ } else {
+ value, n := consumeUntil("&|!,()/")
+ i += n
+ arg = strings.TrimSpace(value)
+ }
+ if strings.ToLower(lastValue) == "isempty" && arg != "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg))
+ }
+ if arg == "" && strings.ToLower(lastValue) != "isempty" {
+ if i < len(runes) && runes[i] == '/' {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.")
+ } else {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue))
+ }
+ }
+ // note that we sent an empty SetArgument token if we are isempty
+ node.token, node.value = lfTokenSetArgument, arg
+ return node, nil
+ }
+
+ switch runes[i] {
+ case '&':
+ if !peekIs('&') {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '&'. Did you mean '&&'?")
+ }
+ i += 2
+ node.token = lfTokenAnd
+ case '|':
+ if !peekIs('|') {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '|'. Did you mean '||'?")
+ }
+ i += 2
+ node.token = lfTokenOr
+ case '!':
+ i += 1
+ node.token = lfTokenNot
+ case ',':
+ i += 1
+ node.token = lfTokenOr
+ case '(':
+ i += 1
+ node.token = lfTokenOpenGroup
+ case ')':
+ i += 1
+ node.token = lfTokenCloseGroup
+ case '/':
+ i += 1
+ value, n := consumeUntil("/")
+ i += n + 1
+ node.token, node.value = lfTokenRegexp, value
+ default:
+ value, n := consumeUntil("&|!,()/:")
+ i += n
+ value = strings.TrimSpace(value)
+
+ //are we the beginning of a set operation?
+ if i < len(runes) && runes[i] == ':' {
+ if peekIs(' ') {
+ if value == "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.")
+ }
+ i += 1
+ //we are the beginning of a set operation
+ node.token, node.value = lfTokenSetKey, value
+ return node, nil
+ }
+ additionalValue, n := consumeUntil("&|!,()/")
+ additionalValue = strings.TrimSpace(additionalValue)
+ if additionalValue == ":" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.")
+ }
+ i += n
+ value += additionalValue
+ }
+
+ valueToCheckForSetOperation := strings.ToLower(value)
+ for setOperation := range validSetOperations {
+ idx := strings.Index(valueToCheckForSetOperation, " "+setOperation)
+ if idx > 0 {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation]))
+ }
+ }
+
+ node.token, node.value = lfTokenLabel, strings.TrimSpace(value)
+ }
+ return node, nil
+ }
+}
+
+func MustParseLabelFilter(input string) LabelFilter {
+ filter, err := ParseLabelFilter(input)
+ if err != nil {
+ panic(err)
+ }
+ return filter
+}
+
+func ParseLabelFilter(input string) (LabelFilter, error) {
+ if DEBUG_LABEL_FILTER_PARSING {
+ fmt.Println("\n==============")
+ fmt.Println("Input: ", input)
+ fmt.Print("Tokens: ")
+ }
+ if input == "" {
+ return func(_ []string) bool { return true }, nil
+ }
+ nextToken := tokenize(input)
+
+ root := &treeNode{token: lfTokenRoot}
+ current := root
+LOOP:
+ for {
+ node, err := nextToken()
+ if err != nil {
+ return nil, err
+ }
+
+ if DEBUG_LABEL_FILTER_PARSING {
+ fmt.Print(node.tokenString() + " ")
+ }
+
+ switch node.token {
+ case lfTokenEOF:
+ break LOOP
+ case lfTokenLabel, lfTokenRegexp, lfTokenSetKey:
+ if current.rightNode != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.")
+ }
+ current.setRightNode(node)
+ case lfTokenNot, lfTokenOpenGroup:
+ if current.rightNode != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Invalid token '%s'.", node.token))
+ }
+ current.setRightNode(node)
+ current = node
+ case lfTokenAnd, lfTokenOr:
+ if current.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Operator '%s' missing left hand operand.", node.token))
+ }
+ nodeToStealFrom := current.firstAncestorWithPrecedenceLEQ(node.token.Precedence())
+ node.setLeftNode(nodeToStealFrom.rightNode)
+ nodeToStealFrom.setRightNode(node)
+ current = node
+ case lfTokenSetOperation:
+ if current.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value))
+ }
+ node.setLeftNode(current.rightNode)
+ current.setRightNode(node)
+ current = node
+ case lfTokenSetArgument:
+ if current.rightNode != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token))
+ }
+ current.setRightNode(node)
+ case lfTokenCloseGroup:
+ firstUnmatchedOpenNode := current.firstUnmatchedOpenNode()
+ if firstUnmatchedOpenNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Mismatched ')' - could not find matching '('.")
+ }
+ if firstUnmatchedOpenNode == current && current.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found empty '()' group.")
+ }
+ firstUnmatchedOpenNode.token = lfTokenCloseGroup //signify the group is now closed
+ current = firstUnmatchedOpenNode.parent
+ default:
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unknown token '%s'.", node.token))
+ }
+ }
+ if DEBUG_LABEL_FILTER_PARSING {
+ fmt.Printf("\n Tree:\n%s", root.toString(0))
+ }
+ return root.constructLabelFilter(input)
+}
+
+func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) {
+ out := strings.TrimSpace(label)
+ if out == "" {
+ return "", GinkgoErrors.InvalidEmptyLabel(cl)
+ }
+ if strings.ContainsAny(out, "&|!,()/") {
+ return "", GinkgoErrors.InvalidLabel(label, cl)
+ }
+ if out[0] == ':' {
+ return "", GinkgoErrors.InvalidLabel(label, cl)
+ }
+ if strings.Contains(out, ":") {
+ components := strings.SplitN(out, ":", 2)
+ if len(components) < 2 || components[1] == "" {
+ return "", GinkgoErrors.InvalidLabel(label, cl)
+ }
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
new file mode 100644
index 0000000..7b1524b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
@@ -0,0 +1,190 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports
+// and across the network connection when running in parallel
+type ReportEntryValue struct {
+ raw interface{} //unexported to prevent gob from freaking out about unregistered structs
+ AsJSON string
+ Representation string
+}
+
+func WrapEntryValue(value interface{}) ReportEntryValue {
+ return ReportEntryValue{
+ raw: value,
+ }
+}
+
+func (rev ReportEntryValue) GetRawValue() interface{} {
+ return rev.raw
+}
+
+func (rev ReportEntryValue) String() string {
+ if rev.raw == nil {
+ return ""
+ }
+ if colorableStringer, ok := rev.raw.(ColorableStringer); ok {
+ return colorableStringer.ColorableString()
+ }
+
+ if stringer, ok := rev.raw.(fmt.Stringer); ok {
+ return stringer.String()
+ }
+ if rev.Representation != "" {
+ return rev.Representation
+ }
+ return fmt.Sprintf("%+v", rev.raw)
+}
+
+func (rev ReportEntryValue) MarshalJSON() ([]byte, error) {
+ //All this to capture the representation at encoding-time, not creating time
+ //This way users can Report on pointers and get their final values at reporting-time
+ out := struct {
+ AsJSON string
+ Representation string
+ }{
+ Representation: rev.String(),
+ }
+ asJSON, err := json.Marshal(rev.raw)
+ if err != nil {
+ return nil, err
+ }
+ out.AsJSON = string(asJSON)
+
+ return json.Marshal(out)
+}
+
+func (rev *ReportEntryValue) UnmarshalJSON(data []byte) error {
+ in := struct {
+ AsJSON string
+ Representation string
+ }{}
+ err := json.Unmarshal(data, &in)
+ if err != nil {
+ return err
+ }
+ rev.AsJSON = in.AsJSON
+ rev.Representation = in.Representation
+ return json.Unmarshal([]byte(in.AsJSON), &(rev.raw))
+}
+
+func (rev ReportEntryValue) GobEncode() ([]byte, error) {
+ return rev.MarshalJSON()
+}
+
+func (rev *ReportEntryValue) GobDecode(data []byte) error {
+ return rev.UnmarshalJSON(data)
+}
+
+// ReportEntry captures information attached to `SpecReport` via `AddReportEntry`
+type ReportEntry struct {
+ // Visibility captures the visibility policy for this ReportEntry
+ Visibility ReportEntryVisibility
+ // Location captures the location of the AddReportEntry call
+ Location CodeLocation
+
+ Time time.Time //need this for backwards compatibility
+ TimelineLocation TimelineLocation
+
+ // Name captures the name of this report
+ Name string
+ // Value captures the (optional) object passed into AddReportEntry - this can be
+ // anything the user wants. The value passed to AddReportEntry is wrapped in a ReportEntryValue to make
+ // encoding/decoding the value easier. To access the raw value call entry.GetRawValue()
+ Value ReportEntryValue
+}
+
+// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableString() is used to generate their representation.
+type ColorableStringer interface {
+ ColorableString() string
+}
+
+// StringRepresentation() returns the string representation of the value associated with the ReportEntry --
+// if value is nil, empty string is returned
+// if value is a `ColorableStringer` then `Value.ColorableString()` is returned
+// if value is a `fmt.Stringer` then `Value.String()` is returned
+// otherwise the value is formatted with "%+v"
+func (entry ReportEntry) StringRepresentation() string {
+ return entry.Value.String()
+}
+
+// GetRawValue returns the Value object that was passed to AddReportEntry
+// If called in-process this will be the same object that was passed into AddReportEntry.
+// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be
+// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON
+// field yourself.
+func (entry ReportEntry) GetRawValue() interface{} {
+ return entry.Value.GetRawValue()
+}
+
+func (entry ReportEntry) GetTimelineLocation() TimelineLocation {
+ return entry.TimelineLocation
+}
+
+type ReportEntries []ReportEntry
+
+func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool {
+ for _, entry := range re {
+ if entry.Visibility.Is(visibilities...) {
+ return true
+ }
+ }
+ return false
+}
+
+func (re ReportEntries) WithVisibility(visibilities ...ReportEntryVisibility) ReportEntries {
+ out := ReportEntries{}
+
+ for _, entry := range re {
+ if entry.Visibility.Is(visibilities...) {
+ out = append(out, entry)
+ }
+ }
+
+ return out
+}
+
+// ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter
+type ReportEntryVisibility uint
+
+const (
+ // Always print out this ReportEntry
+ ReportEntryVisibilityAlways ReportEntryVisibility = iota
+ // Only print out this ReportEntry if the spec fails or if the test is run with -v
+ ReportEntryVisibilityFailureOrVerbose
+ // Never print out this ReportEntry (note that ReportEntrys are always encoded in machine readable reports (e.g. JSON, JUnit, etc.))
+ ReportEntryVisibilityNever
+)
+
+var revEnumSupport = NewEnumSupport(map[uint]string{
+ uint(ReportEntryVisibilityAlways): "always",
+ uint(ReportEntryVisibilityFailureOrVerbose): "failure-or-verbose",
+ uint(ReportEntryVisibilityNever): "never",
+})
+
+func (rev ReportEntryVisibility) String() string {
+ return revEnumSupport.String(uint(rev))
+}
+func (rev *ReportEntryVisibility) UnmarshalJSON(b []byte) error {
+ out, err := revEnumSupport.UnmarshJSON(b)
+ *rev = ReportEntryVisibility(out)
+ return err
+}
+func (rev ReportEntryVisibility) MarshalJSON() ([]byte, error) {
+ return revEnumSupport.MarshJSON(uint(rev))
+}
+
+func (v ReportEntryVisibility) Is(visibilities ...ReportEntryVisibility) bool {
+ for _, visibility := range visibilities {
+ if v == visibility {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go
new file mode 100644
index 0000000..aae69b0
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go
@@ -0,0 +1,914 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+)
+
+const GINKGO_FOCUS_EXIT_CODE = 197
+const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999"
+
+// Report captures information about a Ginkgo test run
+type Report struct {
+ //SuitePath captures the absolute path to the test suite
+ SuitePath string
+
+ //SuiteDescription captures the description string passed to the DSL's RunSpecs() function
+ SuiteDescription string
+
+ //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function
+ SuiteLabels []string
+
+ //SuiteSucceeded captures the success or failure status of the test run
+ //If true, the test run is considered successful.
+ //If false, the test run is considered unsuccessful
+ SuiteSucceeded bool
+
+ //SuiteHasProgrammaticFocus captures whether the test suite has a test or set of tests that are programmatically focused
+ //(i.e an `FIt` or an `FDescribe`
+ SuiteHasProgrammaticFocus bool
+
+ //SpecialSuiteFailureReasons may contain special failure reasons
+ //For example, a test suite might be considered "failed" even if none of the individual specs
+ //have a failure state. For example, if the user has configured --fail-on-pending the test suite
+ //will have failed if there are pending tests even though all non-pending tests may have passed. In such
+ //cases, Ginkgo populates SpecialSuiteFailureReasons with a clear message indicating the reason for the failure.
+ //SpecialSuiteFailureReasons is also populated if the test suite is interrupted by the user.
+ //Since multiple special failure reasons can occur, this field is a slice.
+ SpecialSuiteFailureReasons []string
+
+ //PreRunStats contains a set of stats captured before the test run begins. This is primarily used
+ //by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs)
+ //and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters.
+ PreRunStats PreRunStats
+
+ //StartTime and EndTime capture the start and end time of the test run
+ StartTime time.Time
+ EndTime time.Time
+
+ //RunTime captures the duration of the test run
+ RunTime time.Duration
+
+ //SuiteConfig captures the Ginkgo configuration governing this test run
+ //SuiteConfig includes information necessary for reproducing an identical test run,
+ //such as the random seed and any filters applied during the test run
+ SuiteConfig SuiteConfig
+
+ //SpecReports is a list of all SpecReports generated by this test run
+ //It is empty when the SuiteReport is provided to ReportBeforeSuite
+ SpecReports SpecReports
+}
+
+// PreRunStats contains a set of stats captured before the test run begins. This is primarily used
+// by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs)
+// and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters.
+type PreRunStats struct {
+ TotalSpecs int
+ SpecsThatWillRun int
+}
+
+// Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes
+// to form a complete final report.
+func (report Report) Add(other Report) Report {
+ report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded
+
+ if other.StartTime.Before(report.StartTime) {
+ report.StartTime = other.StartTime
+ }
+
+ if other.EndTime.After(report.EndTime) {
+ report.EndTime = other.EndTime
+ }
+
+ specialSuiteFailureReasons := []string{}
+ reasonsLookup := map[string]bool{}
+ for _, reasons := range [][]string{report.SpecialSuiteFailureReasons, other.SpecialSuiteFailureReasons} {
+ for _, reason := range reasons {
+ if !reasonsLookup[reason] {
+ reasonsLookup[reason] = true
+ specialSuiteFailureReasons = append(specialSuiteFailureReasons, reason)
+ }
+ }
+ }
+ report.SpecialSuiteFailureReasons = specialSuiteFailureReasons
+ report.RunTime = report.EndTime.Sub(report.StartTime)
+
+ reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports))
+ copy(reports, report.SpecReports)
+ offset := len(report.SpecReports)
+ for i := range other.SpecReports {
+ reports[i+offset] = other.SpecReports[i]
+ }
+
+ report.SpecReports = reports
+ return report
+}
+
+// SpecReport captures information about a Ginkgo spec.
+type SpecReport struct {
+ // ContainerHierarchyTexts is a slice containing the text strings of
+ // all Describe/Context/When containers in this spec's hierarchy.
+ ContainerHierarchyTexts []string
+
+ // ContainerHierarchyLocations is a slice containing the CodeLocations of
+ // all Describe/Context/When containers in this spec's hierarchy.
+ ContainerHierarchyLocations []CodeLocation
+
+ // ContainerHierarchyLabels is a slice containing the labels of
+ // all Describe/Context/When containers in this spec's hierarchy
+ ContainerHierarchyLabels [][]string
+
+ // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text
+ // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be
+ // one of the NodeTypesForSuiteLevelNodes node types)
+ LeafNodeType NodeType
+ LeafNodeLocation CodeLocation
+ LeafNodeLabels []string
+ LeafNodeText string
+
+ // State captures whether the spec has passed, failed, etc.
+ State SpecState
+
+ // IsSerial captures whether the spec has the Serial decorator
+ IsSerial bool
+
+ // IsInOrderedContainer captures whether the spec appears in an Ordered container
+ IsInOrderedContainer bool
+
+ // StartTime and EndTime capture the start and end time of the spec
+ StartTime time.Time
+ EndTime time.Time
+
+ // RunTime captures the duration of the spec
+ RunTime time.Duration
+
+ // ParallelProcess captures the parallel process that this spec ran on
+ ParallelProcess int
+
+ // RunningInParallel captures whether this spec is part of a suite that ran in parallel
+ RunningInParallel bool
+
+ //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip())
+ //It includes detailed information about the Failure
+ Failure Failure
+
+ // NumAttempts captures the number of times this Spec was run.
+ // Flakey specs can be retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator.
+ // Repeated specs can be retried with the use of the MustPassRepeatedly decorator
+ NumAttempts int
+
+ // MaxFlakeAttempts captures whether the spec has been retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator.
+ MaxFlakeAttempts int
+
+ // MaxMustPassRepeatedly captures whether the spec has the MustPassRepeatedly decorator
+ MaxMustPassRepeatedly int
+
+ // CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter
+ CapturedGinkgoWriterOutput string
+
+ // CapturedStdOutErr contains text printed to stdout/stderr (when running in parallel)
+ // This is always empty when running in series or calling CurrentSpecReport()
+ // It is used internally by Ginkgo's reporter
+ CapturedStdOutErr string
+
+ // ReportEntries contains any reports added via `AddReportEntry`
+ ReportEntries ReportEntries
+
+ // ProgressReports contains any progress reports generated during this spec. These can either be manually triggered, or automatically generated by Ginkgo via the PollProgressAfter() decorator
+ ProgressReports []ProgressReport
+
+ // AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode.
+ AdditionalFailures []AdditionalFailure
+
+ // SpecEvents capture additional events that occur during the spec run
+ SpecEvents SpecEvents
+}
+
+func (report SpecReport) MarshalJSON() ([]byte, error) {
+ //All this to avoid emitting an empty Failure struct in the JSON
+ out := struct {
+ ContainerHierarchyTexts []string
+ ContainerHierarchyLocations []CodeLocation
+ ContainerHierarchyLabels [][]string
+ LeafNodeType NodeType
+ LeafNodeLocation CodeLocation
+ LeafNodeLabels []string
+ LeafNodeText string
+ State SpecState
+ StartTime time.Time
+ EndTime time.Time
+ RunTime time.Duration
+ ParallelProcess int
+ Failure *Failure `json:",omitempty"`
+ NumAttempts int
+ MaxFlakeAttempts int
+ MaxMustPassRepeatedly int
+ CapturedGinkgoWriterOutput string `json:",omitempty"`
+ CapturedStdOutErr string `json:",omitempty"`
+ ReportEntries ReportEntries `json:",omitempty"`
+ ProgressReports []ProgressReport `json:",omitempty"`
+ AdditionalFailures []AdditionalFailure `json:",omitempty"`
+ SpecEvents SpecEvents `json:",omitempty"`
+ }{
+ ContainerHierarchyTexts: report.ContainerHierarchyTexts,
+ ContainerHierarchyLocations: report.ContainerHierarchyLocations,
+ ContainerHierarchyLabels: report.ContainerHierarchyLabels,
+ LeafNodeType: report.LeafNodeType,
+ LeafNodeLocation: report.LeafNodeLocation,
+ LeafNodeLabels: report.LeafNodeLabels,
+ LeafNodeText: report.LeafNodeText,
+ State: report.State,
+ StartTime: report.StartTime,
+ EndTime: report.EndTime,
+ RunTime: report.RunTime,
+ ParallelProcess: report.ParallelProcess,
+ Failure: nil,
+ ReportEntries: nil,
+ NumAttempts: report.NumAttempts,
+ MaxFlakeAttempts: report.MaxFlakeAttempts,
+ MaxMustPassRepeatedly: report.MaxMustPassRepeatedly,
+ CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput,
+ CapturedStdOutErr: report.CapturedStdOutErr,
+ }
+
+ if !report.Failure.IsZero() {
+ out.Failure = &(report.Failure)
+ }
+ if len(report.ReportEntries) > 0 {
+ out.ReportEntries = report.ReportEntries
+ }
+ if len(report.ProgressReports) > 0 {
+ out.ProgressReports = report.ProgressReports
+ }
+ if len(report.AdditionalFailures) > 0 {
+ out.AdditionalFailures = report.AdditionalFailures
+ }
+ if len(report.SpecEvents) > 0 {
+ out.SpecEvents = report.SpecEvents
+ }
+
+ return json.Marshal(out)
+}
+
+// CombinedOutput returns a single string representation of both CapturedStdOutErr and CapturedGinkgoWriterOutput
+// Note that both are empty when using CurrentSpecReport() so CurrentSpecReport().CombinedOutput() will always be empty.
+// CombinedOutput() is used internally by Ginkgo's reporter.
+func (report SpecReport) CombinedOutput() string {
+ if report.CapturedStdOutErr == "" {
+ return report.CapturedGinkgoWriterOutput
+ }
+ if report.CapturedGinkgoWriterOutput == "" {
+ return report.CapturedStdOutErr
+ }
+ return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput
+}
+
+// Failed returns true if report.State is one of the SpecStateFailureStates
+// (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted)
+func (report SpecReport) Failed() bool {
+ return report.State.Is(SpecStateFailureStates)
+}
+
+// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText
+func (report SpecReport) FullText() string {
+ texts := []string{}
+ texts = append(texts, report.ContainerHierarchyTexts...)
+ if report.LeafNodeText != "" {
+ texts = append(texts, report.LeafNodeText)
+ }
+ return strings.Join(texts, " ")
+}
+
+// Labels returns a deduped set of all the spec's Labels.
+func (report SpecReport) Labels() []string {
+ out := []string{}
+ seen := map[string]bool{}
+ for _, labels := range report.ContainerHierarchyLabels {
+ for _, label := range labels {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+ }
+ for _, label := range report.LeafNodeLabels {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+
+ return out
+}
+
+// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query
+func (report SpecReport) MatchesLabelFilter(query string) (bool, error) {
+ filter, err := ParseLabelFilter(query)
+ if err != nil {
+ return false, err
+ }
+ return filter(report.Labels()), nil
+}
+
+// FileName() returns the name of the file containing the spec
+func (report SpecReport) FileName() string {
+ return report.LeafNodeLocation.FileName
+}
+
+// LineNumber() returns the line number of the leaf node
+func (report SpecReport) LineNumber() int {
+ return report.LeafNodeLocation.LineNumber
+}
+
+// FailureMessage() returns the failure message (or empty string if the test hasn't failed)
+func (report SpecReport) FailureMessage() string {
+ return report.Failure.Message
+}
+
+// FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed)
+func (report SpecReport) FailureLocation() CodeLocation {
+ return report.Failure.Location
+}
+
+// Timeline() returns a timeline view of the report
+func (report SpecReport) Timeline() Timeline {
+ timeline := Timeline{}
+ if !report.Failure.IsZero() {
+ timeline = append(timeline, report.Failure)
+ if report.Failure.AdditionalFailure != nil {
+ timeline = append(timeline, *(report.Failure.AdditionalFailure))
+ }
+ }
+ for _, additionalFailure := range report.AdditionalFailures {
+ timeline = append(timeline, additionalFailure)
+ }
+ for _, reportEntry := range report.ReportEntries {
+ timeline = append(timeline, reportEntry)
+ }
+ for _, progressReport := range report.ProgressReports {
+ timeline = append(timeline, progressReport)
+ }
+ for _, specEvent := range report.SpecEvents {
+ timeline = append(timeline, specEvent)
+ }
+ sort.Sort(timeline)
+ return timeline
+}
+
+type SpecReports []SpecReport
+
+// WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes
+func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports {
+ count := 0
+ for i := range reports {
+ if reports[i].LeafNodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out := make(SpecReports, count)
+ j := 0
+ for i := range reports {
+ if reports[i].LeafNodeType.Is(nodeTypes) {
+ out[j] = reports[i]
+ j++
+ }
+ }
+ return out
+}
+
+// WithState returns the subset of SpecReports with State matching one of the requested SpecStates
+func (reports SpecReports) WithState(states SpecState) SpecReports {
+ count := 0
+ for i := range reports {
+ if reports[i].State.Is(states) {
+ count++
+ }
+ }
+
+ out, j := make(SpecReports, count), 0
+ for i := range reports {
+ if reports[i].State.Is(states) {
+ out[j] = reports[i]
+ j++
+ }
+ }
+ return out
+}
+
+// CountWithState returns the number of SpecReports with State matching one of the requested SpecStates
+func (reports SpecReports) CountWithState(states SpecState) int {
+ n := 0
+ for i := range reports {
+ if reports[i].State.Is(states) {
+ n += 1
+ }
+ }
+ return n
+}
+
+// If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts.
+func (reports SpecReports) CountOfFlakedSpecs() int {
+ n := 0
+ for i := range reports {
+ if reports[i].MaxFlakeAttempts > 1 && reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 {
+ n += 1
+ }
+ }
+ return n
+}
+
+// If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts
+func (reports SpecReports) CountOfRepeatedSpecs() int {
+ n := 0
+ for i := range reports {
+ if reports[i].MaxMustPassRepeatedly > 1 && reports[i].State.Is(SpecStateFailureStates) && reports[i].NumAttempts > 1 {
+ n += 1
+ }
+ }
+ return n
+}
+
+// TimelineLocation captures the location of an event in the spec's timeline
+type TimelineLocation struct {
+ //Offset is the offset (in bytes) of the event relative to the GinkgoWriter stream
+ Offset int `json:",omitempty"`
+
+ //Order is the order of the event with respect to other events. The absolute value of Order
+ //is irrelevant. All that matters is that an event with a lower Order occurs before ane vent with a higher Order
+ Order int `json:",omitempty"`
+
+ Time time.Time
+}
+
+// TimelineEvent represent an event on the timeline
+// consumers of Timeline will need to check the concrete type of each entry to determine how to handle it
+type TimelineEvent interface {
+ GetTimelineLocation() TimelineLocation
+}
+
+type Timeline []TimelineEvent
+
+func (t Timeline) Len() int { return len(t) }
+func (t Timeline) Less(i, j int) bool {
+ return t[i].GetTimelineLocation().Order < t[j].GetTimelineLocation().Order
+}
+func (t Timeline) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t Timeline) WithoutHiddenReportEntries() Timeline {
+ out := Timeline{}
+ for _, event := range t {
+ if reportEntry, isReportEntry := event.(ReportEntry); isReportEntry && reportEntry.Visibility == ReportEntryVisibilityNever {
+ continue
+ }
+ out = append(out, event)
+ }
+ return out
+}
+
+func (t Timeline) WithoutVeryVerboseSpecEvents() Timeline {
+ out := Timeline{}
+ for _, event := range t {
+ if specEvent, isSpecEvent := event.(SpecEvent); isSpecEvent && specEvent.IsOnlyVisibleAtVeryVerbose() {
+ continue
+ }
+ out = append(out, event)
+ }
+ return out
+}
+
+// Failure captures failure information for an individual test
+type Failure struct {
+ // Message - the failure message passed into Fail(...). When using a matcher library
+ // like Gomega, this will contain the failure message generated by Gomega.
+ //
+ // Message is also populated if the user has called Skip(...).
+ Message string
+
+ // Location - the CodeLocation where the failure occurred
+ // This CodeLocation will include a fully-populated StackTrace
+ Location CodeLocation
+
+ TimelineLocation TimelineLocation
+
+ // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked)
+ // then ForwardedPanic will be populated with a string representation of the captured panic.
+ ForwardedPanic string `json:",omitempty"`
+
+ // FailureNodeContext - one of three contexts describing the node in which the failure occurred:
+ // FailureNodeIsLeafNode means the failure occurred in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated
+ // FailureNodeAtTopLevel means the failure occurred in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated.
+ // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocation, and FailureNodeContainerIndex will be populated.
+ //
+ // FailureNodeType will contain the NodeType of the node in which the failure occurred.
+ // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred.
+ // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred.
+ FailureNodeContext FailureNodeContext `json:",omitempty"`
+
+ FailureNodeType NodeType `json:",omitempty"`
+
+ FailureNodeLocation CodeLocation `json:",omitempty"`
+
+ FailureNodeContainerIndex int `json:",omitempty"`
+
+ //ProgressReport is populated if the spec was interrupted or timed out
+ ProgressReport ProgressReport `json:",omitempty"`
+
+ //AdditionalFailure is non-nil if a follow-on failure occurred within the same node after the primary failure. This only happens when a node has timed out or been interrupted. In such cases the AdditionalFailure can include information about where/why the spec was stuck.
+ AdditionalFailure *AdditionalFailure `json:",omitempty"`
+}
+
+func (f Failure) IsZero() bool {
+ return f.Message == "" && (f.Location == CodeLocation{})
+}
+
+func (f Failure) GetTimelineLocation() TimelineLocation {
+ return f.TimelineLocation
+}
+
+// FailureNodeContext captures the location context for the node containing the failing line of code
+type FailureNodeContext uint
+
+const (
+ FailureNodeContextInvalid FailureNodeContext = iota
+
+ FailureNodeIsLeafNode
+ FailureNodeAtTopLevel
+ FailureNodeInContainer
+)
+
+var fncEnumSupport = NewEnumSupport(map[uint]string{
+ uint(FailureNodeContextInvalid): "INVALID FAILURE NODE CONTEXT",
+ uint(FailureNodeIsLeafNode): "leaf-node",
+ uint(FailureNodeAtTopLevel): "top-level",
+ uint(FailureNodeInContainer): "in-container",
+})
+
+func (fnc FailureNodeContext) String() string {
+ return fncEnumSupport.String(uint(fnc))
+}
+func (fnc *FailureNodeContext) UnmarshalJSON(b []byte) error {
+ out, err := fncEnumSupport.UnmarshJSON(b)
+ *fnc = FailureNodeContext(out)
+ return err
+}
+func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) {
+ return fncEnumSupport.MarshJSON(uint(fnc))
+}
+
+// AdditionalFailure capturs any additional failures that occur after the initial failure of a psec
+// these typically occur in clean up nodes after the spec has failed.
+// We can't simply use Failure as we want to track the SpecState to know what kind of failure this is
+type AdditionalFailure struct {
+ State SpecState
+ Failure Failure
+}
+
+func (f AdditionalFailure) GetTimelineLocation() TimelineLocation {
+ return f.Failure.TimelineLocation
+}
+
+// SpecState captures the state of a spec
+// To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)`
+type SpecState uint
+
+const (
+ SpecStateInvalid SpecState = 0
+
+ SpecStatePending SpecState = 1 << iota
+ SpecStateSkipped
+ SpecStatePassed
+ SpecStateFailed
+ SpecStateAborted
+ SpecStatePanicked
+ SpecStateInterrupted
+ SpecStateTimedout
+)
+
+var ssEnumSupport = NewEnumSupport(map[uint]string{
+ uint(SpecStateInvalid): "INVALID SPEC STATE",
+ uint(SpecStatePending): "pending",
+ uint(SpecStateSkipped): "skipped",
+ uint(SpecStatePassed): "passed",
+ uint(SpecStateFailed): "failed",
+ uint(SpecStateAborted): "aborted",
+ uint(SpecStatePanicked): "panicked",
+ uint(SpecStateInterrupted): "interrupted",
+ uint(SpecStateTimedout): "timedout",
+})
+
+func (ss SpecState) String() string {
+ return ssEnumSupport.String(uint(ss))
+}
+func (ss SpecState) GomegaString() string {
+ return ssEnumSupport.String(uint(ss))
+}
+func (ss *SpecState) UnmarshalJSON(b []byte) error {
+ out, err := ssEnumSupport.UnmarshJSON(b)
+ *ss = SpecState(out)
+ return err
+}
+func (ss SpecState) MarshalJSON() ([]byte, error) {
+ return ssEnumSupport.MarshJSON(uint(ss))
+}
+
+var SpecStateFailureStates = SpecStateFailed | SpecStateTimedout | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted
+
+func (ss SpecState) Is(states SpecState) bool {
+ return ss&states != 0
+}
+
+// ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace
+type ProgressReport struct {
+ Message string `json:",omitempty"`
+ ParallelProcess int `json:",omitempty"`
+ RunningInParallel bool `json:",omitempty"`
+
+ ContainerHierarchyTexts []string `json:",omitempty"`
+ LeafNodeText string `json:",omitempty"`
+ LeafNodeLocation CodeLocation `json:",omitempty"`
+ SpecStartTime time.Time `json:",omitempty"`
+
+ CurrentNodeType NodeType `json:",omitempty"`
+ CurrentNodeText string `json:",omitempty"`
+ CurrentNodeLocation CodeLocation `json:",omitempty"`
+ CurrentNodeStartTime time.Time `json:",omitempty"`
+
+ CurrentStepText string `json:",omitempty"`
+ CurrentStepLocation CodeLocation `json:",omitempty"`
+ CurrentStepStartTime time.Time `json:",omitempty"`
+
+ AdditionalReports []string `json:",omitempty"`
+
+ CapturedGinkgoWriterOutput string `json:",omitempty"`
+ TimelineLocation TimelineLocation `json:",omitempty"`
+
+ Goroutines []Goroutine `json:",omitempty"`
+}
+
+func (pr ProgressReport) IsZero() bool {
+ return pr.CurrentNodeType == NodeTypeInvalid
+}
+
+func (pr ProgressReport) Time() time.Time {
+ return pr.TimelineLocation.Time
+}
+
+func (pr ProgressReport) SpecGoroutine() Goroutine {
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine {
+ return goroutine
+ }
+ }
+ return Goroutine{}
+}
+
+func (pr ProgressReport) HighlightedGoroutines() []Goroutine {
+ out := []Goroutine{}
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine || !goroutine.HasHighlights() {
+ continue
+ }
+ out = append(out, goroutine)
+ }
+ return out
+}
+
+func (pr ProgressReport) OtherGoroutines() []Goroutine {
+ out := []Goroutine{}
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine || goroutine.HasHighlights() {
+ continue
+ }
+ out = append(out, goroutine)
+ }
+ return out
+}
+
+func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport {
+ out := pr
+ out.CapturedGinkgoWriterOutput = ""
+ return out
+}
+
+func (pr ProgressReport) WithoutOtherGoroutines() ProgressReport {
+ out := pr
+ filteredGoroutines := []Goroutine{}
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine || goroutine.HasHighlights() {
+ filteredGoroutines = append(filteredGoroutines, goroutine)
+ }
+ }
+ out.Goroutines = filteredGoroutines
+ return out
+}
+
+func (pr ProgressReport) GetTimelineLocation() TimelineLocation {
+ return pr.TimelineLocation
+}
+
+type Goroutine struct {
+ ID uint64
+ State string
+ Stack []FunctionCall
+ IsSpecGoroutine bool
+}
+
+func (g Goroutine) IsZero() bool {
+ return g.ID == 0
+}
+
+func (g Goroutine) HasHighlights() bool {
+ for _, fc := range g.Stack {
+ if fc.Highlight {
+ return true
+ }
+ }
+
+ return false
+}
+
+type FunctionCall struct {
+ Function string
+ Filename string
+ Line int
+ Highlight bool `json:",omitempty"`
+ Source []string `json:",omitempty"`
+ SourceHighlight int `json:",omitempty"`
+}
+
+// NodeType captures the type of a given Ginkgo Node
+type NodeType uint
+
+const (
+ NodeTypeInvalid NodeType = 0
+
+ NodeTypeContainer NodeType = 1 << iota
+ NodeTypeIt
+
+ NodeTypeBeforeEach
+ NodeTypeJustBeforeEach
+ NodeTypeAfterEach
+ NodeTypeJustAfterEach
+
+ NodeTypeBeforeAll
+ NodeTypeAfterAll
+
+ NodeTypeBeforeSuite
+ NodeTypeSynchronizedBeforeSuite
+ NodeTypeAfterSuite
+ NodeTypeSynchronizedAfterSuite
+
+ NodeTypeReportBeforeEach
+ NodeTypeReportAfterEach
+ NodeTypeReportBeforeSuite
+ NodeTypeReportAfterSuite
+
+ NodeTypeCleanupInvalid
+ NodeTypeCleanupAfterEach
+ NodeTypeCleanupAfterAll
+ NodeTypeCleanupAfterSuite
+)
+
+var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt
+var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite
+var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite
+var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite
+
+var ntEnumSupport = NewEnumSupport(map[uint]string{
+ uint(NodeTypeInvalid): "INVALID NODE TYPE",
+ uint(NodeTypeContainer): "Container",
+ uint(NodeTypeIt): "It",
+ uint(NodeTypeBeforeEach): "BeforeEach",
+ uint(NodeTypeJustBeforeEach): "JustBeforeEach",
+ uint(NodeTypeAfterEach): "AfterEach",
+ uint(NodeTypeJustAfterEach): "JustAfterEach",
+ uint(NodeTypeBeforeAll): "BeforeAll",
+ uint(NodeTypeAfterAll): "AfterAll",
+ uint(NodeTypeBeforeSuite): "BeforeSuite",
+ uint(NodeTypeSynchronizedBeforeSuite): "SynchronizedBeforeSuite",
+ uint(NodeTypeAfterSuite): "AfterSuite",
+ uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite",
+ uint(NodeTypeReportBeforeEach): "ReportBeforeEach",
+ uint(NodeTypeReportAfterEach): "ReportAfterEach",
+ uint(NodeTypeReportBeforeSuite): "ReportBeforeSuite",
+ uint(NodeTypeReportAfterSuite): "ReportAfterSuite",
+ uint(NodeTypeCleanupInvalid): "DeferCleanup",
+ uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)",
+ uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)",
+ uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)",
+})
+
+func (nt NodeType) String() string {
+ return ntEnumSupport.String(uint(nt))
+}
+func (nt *NodeType) UnmarshalJSON(b []byte) error {
+ out, err := ntEnumSupport.UnmarshJSON(b)
+ *nt = NodeType(out)
+ return err
+}
+func (nt NodeType) MarshalJSON() ([]byte, error) {
+ return ntEnumSupport.MarshJSON(uint(nt))
+}
+
+func (nt NodeType) Is(nodeTypes NodeType) bool {
+ return nt&nodeTypes != 0
+}
+
+/*
+SpecEvent captures a vareity of events that can occur when specs run. See SpecEventType for the list of available events.
+*/
+type SpecEvent struct {
+ SpecEventType SpecEventType
+
+ CodeLocation CodeLocation
+ TimelineLocation TimelineLocation
+
+ Message string `json:",omitempty"`
+ Duration time.Duration `json:",omitempty"`
+ NodeType NodeType `json:",omitempty"`
+ Attempt int `json:",omitempty"`
+}
+
+func (se SpecEvent) GetTimelineLocation() TimelineLocation {
+ return se.TimelineLocation
+}
+
+func (se SpecEvent) IsOnlyVisibleAtVeryVerbose() bool {
+ return se.SpecEventType.Is(SpecEventByEnd | SpecEventNodeStart | SpecEventNodeEnd)
+}
+
+func (se SpecEvent) GomegaString() string {
+ out := &strings.Builder{}
+ out.WriteString("[" + se.SpecEventType.String() + " SpecEvent] ")
+ if se.Message != "" {
+ out.WriteString("Message=")
+ out.WriteString(`"` + se.Message + `",`)
+ }
+ if se.Duration != 0 {
+ out.WriteString("Duration=" + se.Duration.String() + ",")
+ }
+ if se.NodeType != NodeTypeInvalid {
+ out.WriteString("NodeType=" + se.NodeType.String() + ",")
+ }
+ if se.Attempt != 0 {
+ out.WriteString(fmt.Sprintf("Attempt=%d", se.Attempt) + ",")
+ }
+ out.WriteString("CL=" + se.CodeLocation.String() + ",")
+ out.WriteString(fmt.Sprintf("TL.Offset=%d", se.TimelineLocation.Offset))
+
+ return out.String()
+}
+
+type SpecEvents []SpecEvent
+
+func (se SpecEvents) WithType(seType SpecEventType) SpecEvents {
+ out := SpecEvents{}
+ for _, event := range se {
+ if event.SpecEventType.Is(seType) {
+ out = append(out, event)
+ }
+ }
+ return out
+}
+
+type SpecEventType uint
+
+const (
+ SpecEventInvalid SpecEventType = 0
+
+ SpecEventByStart SpecEventType = 1 << iota
+ SpecEventByEnd
+ SpecEventNodeStart
+ SpecEventNodeEnd
+ SpecEventSpecRepeat
+ SpecEventSpecRetry
+)
+
+var seEnumSupport = NewEnumSupport(map[uint]string{
+ uint(SpecEventInvalid): "INVALID SPEC EVENT",
+ uint(SpecEventByStart): "By",
+ uint(SpecEventByEnd): "By (End)",
+ uint(SpecEventNodeStart): "Node",
+ uint(SpecEventNodeEnd): "Node (End)",
+ uint(SpecEventSpecRepeat): "Repeat",
+ uint(SpecEventSpecRetry): "Retry",
+})
+
+func (se SpecEventType) String() string {
+ return seEnumSupport.String(uint(se))
+}
+func (se *SpecEventType) UnmarshalJSON(b []byte) error {
+ out, err := seEnumSupport.UnmarshJSON(b)
+ *se = SpecEventType(out)
+ return err
+}
+func (se SpecEventType) MarshalJSON() ([]byte, error) {
+ return seEnumSupport.MarshJSON(uint(se))
+}
+
+func (se SpecEventType) Is(specEventTypes SpecEventType) bool {
+ return se&specEventTypes != 0
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types_patch.go b/vendor/github.com/onsi/ginkgo/v2/types/types_patch.go
new file mode 100644
index 0000000..02d319b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/types_patch.go
@@ -0,0 +1,8 @@
+package types
+
+type TestSpec interface {
+ CodeLocations() []CodeLocation
+ Text() string
+ AppendText(text string)
+ Labels() []string
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go
new file mode 100644
index 0000000..acab034
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go
@@ -0,0 +1,3 @@
+package types
+
+const VERSION = "2.19.0"
diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore
new file mode 100644
index 0000000..425d0a5
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/.gitignore
@@ -0,0 +1,7 @@
+.DS_Store
+*.test
+.
+.idea
+gomega.iml
+TODO
+.vscode
\ No newline at end of file
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
new file mode 100644
index 0000000..62af14a
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -0,0 +1,721 @@
+## 1.33.1
+
+### Fixes
+- fix confusing eventually docs [3a66379]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.17.1 to 2.17.2 [e9bc35a]
+
+## 1.33.0
+
+### Features
+
+`Receive` not accepts `Receive(, MATCHER>)`, allowing you to pick out a specific value on the channel that satisfies the provided matcher and is stored in the provided pointer.
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.15.0 to 2.17.1 (#745) [9999deb]
+- Bump github-pages from 229 to 230 in /docs (#735) [cb5ff21]
+- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#746) [bac6596]
+
+## 1.32.0
+
+### Maintenance
+- Migrate github.com/golang/protobuf to google.golang.org/protobuf [436a197]
+
+ This release drops the deprecated github.com/golang/protobuf and adopts google.golang.org/protobuf. Care was taken to ensure the release is backwards compatible (thanks @jbduncan !). Please open an issue if you run into one.
+
+- chore: test with Go 1.22 (#733) [32ef35e]
+- Bump golang.org/x/net from 0.19.0 to 0.20.0 (#717) [a0d0387]
+- Bump github-pages and jekyll-feed in /docs (#732) [b71e477]
+- docs: fix typo and broken anchor link to gstruct [f460154]
+- docs: fix HaveEach matcher signature [a2862e4]
+
+## 1.31.1
+
+### Fixes
+- Inverted arguments order of FailureMessage of BeComparableToMatcher [e0dd999]
+- Update test in case keeping msg is desired [ad1a367]
+
+### Maintenance
+- Show how to import the format sub package [24e958d]
+- tidy up go.sum [26661b8]
+- bump dependencies [bde8f7a]
+
+## 1.31.0
+
+### Features
+- Async assertions include context cancellation cause if present [121c37f]
+
+### Maintenance
+- Bump minimum go version [dee1e3c]
+- docs: fix typo in example usage "occured" -> "occurred" [49005fe]
+- Bump actions/setup-go from 4 to 5 (#714) [f1c8757]
+- Bump github/codeql-action from 2 to 3 (#715) [9836e76]
+- Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.2 (#713) [54726f0]
+- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#711) [df97ecc]
+- docs: fix `HaveExactElement` typo (#712) [a672c86]
+
+## 1.30.0
+
+### Features
+- BeTrueBecause and BeFalseBecause allow for better failure messages [4da4c7f]
+
+### Maintenance
+- Bump actions/checkout from 3 to 4 (#694) [6ca6e97]
+- doc: fix type on gleak go doc [f1b8343]
+
+## 1.29.0
+
+### Features
+- MatchError can now take an optional func(error) bool + description [2b39142]
+
+## 1.28.1
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.12.0 to 2.13.0 [635d196]
+- Bump github.com/google/go-cmp from 0.5.9 to 0.6.0 [14f8859]
+- Bump golang.org/x/net from 0.14.0 to 0.17.0 [d8a6508]
+- #703 doc(matchers): HaveEach() doc comment updated [2705bdb]
+- Minor typos (#699) [375648c]
+
+## 1.28.0
+
+### Features
+- Add VerifyHost handler to ghttp (#698) [0b03b36]
+
+### Fixes
+- Read Body for Newer Responses in HaveHTTPBodyMatcher (#686) [18d6673]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.11.0 to 2.12.0 (#693) [55a33f3]
+- Typo in matchers.go (#691) [de68e8f]
+- Bump commonmarker from 0.23.9 to 0.23.10 in /docs (#690) [ab17f5e]
+- chore: update test matrix for Go 1.21 (#689) [5069017]
+- Bump golang.org/x/net from 0.12.0 to 0.14.0 (#688) [babe25f]
+
+## 1.27.10
+
+### Fixes
+- fix: go 1.21 adding goroutine ID to creator+location (#685) [bdc7803]
+
+## 1.27.9
+
+### Fixes
+- Prevent nil-dereference in format.Object for boxed nil error (#681) [3b31fc3]
+
+### Maintenance
+- Bump golang.org/x/net from 0.11.0 to 0.12.0 (#679) [360849b]
+- chore: use String() instead of fmt.Sprintf (#678) [86f3659]
+- Bump golang.org/x/net from 0.10.0 to 0.11.0 (#674) [642ead0]
+- chore: unnecessary use of fmt.Sprintf (#677) [ceb9ca6]
+- Bump github.com/onsi/ginkgo/v2 from 2.10.0 to 2.11.0 (#675) [a2087d8]
+- docs: fix ContainSubstring references (#673) [fc9a89f]
+- Bump github.com/onsi/ginkgo/v2 from 2.9.7 to 2.10.0 (#671) [9076019]
+
+## 1.27.8
+
+### Fixes
+- HaveExactElement should not call FailureMessage if a submatcher returned an error [096f392]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.9.5 to 2.9.7 (#669) [8884bee]
+
+## 1.27.7
+
+### Fixes
+- fix: gcustom.MakeMatcher accepts nil as actual value (#666) [57054d5]
+
+### Maintenance
+- update gitignore [05c1bc6]
+- Bump github.com/onsi/ginkgo/v2 from 2.9.4 to 2.9.5 (#663) [7cadcf6]
+- Bump golang.org/x/net from 0.9.0 to 0.10.0 (#662) [b524839]
+- Bump github.com/onsi/ginkgo/v2 from 2.9.2 to 2.9.4 (#661) [5f44694]
+- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#657) [05dc99a]
+- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#658) [3a033d1]
+- Replace deprecated NewGomegaWithT with NewWithT (#659) [a19238f]
+- Bump golang.org/x/net from 0.8.0 to 0.9.0 (#656) [29ed041]
+- Bump actions/setup-go from 3 to 4 (#651) [11b2080]
+
+## 1.27.6
+
+### Fixes
+- Allow collections matchers to work correctly when expected has nil elements [60e7cf3]
+
+### Maintenance
+- updates MatchError godoc comment to also accept a Gomega matcher (#654) [67b869d]
+
+## 1.27.5
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.9.1 to 2.9.2 (#653) [a215021]
+- Bump github.com/go-task/slim-sprig (#652) [a26fed8]
+
+## 1.27.4
+
+### Fixes
+- improve error formatting and remove duplication of error message in Eventually/Consistently [854f075]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.9.0 to 2.9.1 (#650) [ccebd9b]
+
+## 1.27.3
+
+### Fixes
+- format.Object now always includes err.Error() when passed an error [86d97ef]
+- Fix HaveExactElements to work inside ContainElement or other collection matchers (#648) [636757e]
+
+### Maintenance
+- Bump github.com/golang/protobuf from 1.5.2 to 1.5.3 (#649) [cc16689]
+- Bump github.com/onsi/ginkgo/v2 from 2.8.4 to 2.9.0 (#646) [e783366]
+
+## 1.27.2
+
+### Fixes
+- improve poll progress message when polling a consistently that has been passing [28a319b]
+
+### Maintenance
+- bump ginkgo
+- remove tools.go hack as Ginkgo 2.8.2 automatically pulls in the cli dependencies [81443b3]
+
+## 1.27.1
+
+### Maintenance
+
+- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#640) [bc686cd]
+
+## 1.27.0
+
+### Features
+- Add HaveExactElements matcher (#634) [9d50783]
+- update Gomega docs to discuss GinkgoHelper() [be32774]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.8.0 to 2.8.1 (#639) [296a68b]
+- Bump golang.org/x/net from 0.5.0 to 0.6.0 (#638) [c2b098b]
+- Bump github-pages from 227 to 228 in /docs (#636) [a9069ab]
+- test: update matrix for Go 1.20 (#635) [6bd25c8]
+- Bump github.com/onsi/ginkgo/v2 from 2.7.0 to 2.8.0 (#631) [5445f8b]
+- Bump webrick from 1.7.0 to 1.8.1 in /docs (#630) [03e93bb]
+- codeql: add ruby language (#626) [63c7d21]
+- dependabot: add bundler package-ecosystem for docs (#625) [d92f963]
+
+## 1.26.0
+
+### Features
+- When a polled function returns an error, keep track of the actual and report on the matcher state of the last non-errored actual [21f3090]
+- improve eventually failure message output [c530fb3]
+
+### Fixes
+- fix several documentation spelling issues [e2eff1f]
+
+
+## 1.25.0
+
+### Features
+- add `MustPassRepeatedly(int)` to asyncAssertion (#619) [4509f72]
+- compare unwrapped errors using DeepEqual (#617) [aaeaa5d]
+
+### Maintenance
+- Bump golang.org/x/net from 0.4.0 to 0.5.0 (#614) [c7cfea4]
+- Bump github.com/onsi/ginkgo/v2 from 2.6.1 to 2.7.0 (#615) [71b8adb]
+- Docs: Fix typo "MUltiple" -> "Multiple" (#616) [9351dda]
+- clean up go.sum [cd1dc1d]
+
+## 1.24.2
+
+### Fixes
+- Correctly handle assertion failure panics for eventually/consistnetly "g Gomega"s in a goroutine [78f1660]
+- docs:Fix typo "you an" -> "you can" (#607) [3187c1f]
+- fixes issue #600 (#606) [808d192]
+
+### Maintenance
+- Bump golang.org/x/net from 0.2.0 to 0.4.0 (#611) [6ebc0bf]
+- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#612) [258cfc8]
+- Bump github.com/onsi/ginkgo/v2 from 2.5.0 to 2.5.1 (#609) [e6c3eb9]
+
+## 1.24.1
+
+### Fixes
+- maintain backward compatibility for Eventually and Consisntetly's signatures [4c7df5e]
+- fix small typo (#601) [ea0ebe6]
+
+### Maintenance
+- Bump golang.org/x/net from 0.1.0 to 0.2.0 (#603) [1ba8372]
+- Bump github.com/onsi/ginkgo/v2 from 2.4.0 to 2.5.0 (#602) [f9426cb]
+- fix label-filter in test.yml [d795db6]
+- stop running flakey tests and rely on external network dependencies in CI [7133290]
+
+## 1.24.0
+
+### Features
+
+Introducting [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers.
+
+This is an RC release for `gcustom`. The external API may be tweaked in response to feedback however it is expected to remain mostly stable.
+
+### Maintenance
+
+- Update BeComparableTo documentation [756eaa0]
+
+## 1.23.0
+
+### Features
+- Custom formatting on a per-type basis can be provided using `format.RegisterCustomFormatter()` -- see the docs [here](https://onsi.github.io/gomega/#adjusting-output)
+
+- Substantial improvement have been made to `StopTrying()`:
+ - Users can now use `StopTrying().Wrap(err)` to wrap errors and `StopTrying().Attach(description, object)` to attach arbitrary objects to the `StopTrying()` error
+ - `StopTrying()` is now always interpreted as a failure. If you are an early adopter of `StopTrying()` you may need to change your code as the prior version would match against the returned value even if `StopTrying()` was returned. Going forward the `StopTrying()` api should remain stable.
+ - `StopTrying()` and `StopTrying().Now()` can both be used in matchers - not just polled functions.
+
+- `TryAgainAfter(duration)` is used like `StopTrying()` but instructs `Eventually` and `Consistently` that the poll should be tried again after the specified duration. This allows you to dynamically adjust the polling duration.
+
+- `ctx` can now be passed-in as the first argument to `Eventually` and `Consistently`.
+
+## Maintenance
+
+- Bump github.com/onsi/ginkgo/v2 from 2.3.0 to 2.3.1 (#597) [afed901]
+- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#599) [7c691b3]
+- Bump github.com/google/go-cmp from 0.5.8 to 0.5.9 (#587) [ff22665]
+
+## 1.22.1
+
+## Fixes
+- When passed a context and no explicit timeout, Eventually will only timeout when the context is cancelled [e5105cf]
+- Allow StopTrying() to be wrapped [bf3cba9]
+
+## Maintenance
+- bump to ginkgo v2.3.0 [c5d5c39]
+
+## 1.22.0
+
+### Features
+
+Several improvements have been made to `Eventually` and `Consistently` in this and the most recent releases:
+
+- Eventually and Consistently can take a context.Context [65c01bc]
+ This enables integration with Ginkgo 2.3.0's interruptible nodes and node timeouts.
+- Eventually and Consistently that are passed a SpecContext can provide reports when an interrupt occurs [0d063c9]
+- Eventually/Consistently will forward an attached context to functions that ask for one [e2091c5]
+- Eventually/Consistently supports passing arguments to functions via WithArguments() [a2dc7c3]
+- Eventually and Consistently can now be stopped early with StopTrying(message) and StopTrying(message).Now() [52976bb]
+
+These improvements are all documented in [Gomega's docs](https://onsi.github.io/gomega/#making-asynchronous-assertions)
+
+## Fixes
+
+## Maintenance
+
+## 1.21.1
+
+### Features
+- Eventually and Consistently that are passed a SpecContext can provide reports when an interrupt occurs [0d063c9]
+
+## 1.21.0
+
+### Features
+- Eventually and Consistently can take a context.Context [65c01bc]
+ This enables integration with Ginkgo 2.3.0's interruptible nodes and node timeouts.
+- Introduces Eventually.Within.ProbeEvery with tests and documentation (#591) [f633800]
+- New BeKeyOf matcher with documentation and unit tests (#590) [fb586b3]
+
+## Fixes
+- Cover the entire gmeasure suite with leak detection [8c54344]
+- Fix gmeasure leak [119d4ce]
+- Ignore new Ginkgo ProgressSignal goroutine in gleak [ba548e2]
+
+## Maintenance
+
+- Fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency (#596) [12469a0]
+
+
+## 1.20.2
+
+## Fixes
+- label specs that rely on remote access; bump timeout on short-circuit test to make it less flaky [35eeadf]
+- gexec: allow more headroom for SIGABRT-related unit tests (#581) [5b78f40]
+- Enable reading from a closed gbytes.Buffer (#575) [061fd26]
+
+## Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.1.5 to 2.1.6 (#583) [55d895b]
+- Bump github.com/onsi/ginkgo/v2 from 2.1.4 to 2.1.5 (#582) [346de7c]
+
+## 1.20.1
+
+## Fixes
+- fix false positive gleaks when using ginkgo -p (#577) [cb46517]
+- Fix typos in gomega_dsl.go (#569) [5f71ed2]
+- don't panic on Eventually(nil), fixing #555 (#567) [9d1186f]
+- vet optional description args in assertions, fixing #560 (#566) [8e37808]
+
+## Maintenance
+- test: add new Go 1.19 to test matrix (#571) [40d7efe]
+- Bump tzinfo from 1.2.9 to 1.2.10 in /docs (#564) [5f26371]
+
+## 1.20.0
+
+## Features
+- New [`gleak`](https://onsi.github.io/gomega/#codegleakcode-finding-leaked-goroutines) experimental goroutine leak detection package! (#538) [85ba7bc]
+- New `BeComparableTo` matcher(#546) that uses `gocmp` to make comparisons [e77ea75]
+- New `HaveExistingField` matcher (#553) [fd130e1]
+- Document how to wrap Gomega (#539) [56714a4]
+
+## Fixes
+- Support pointer receivers in HaveField; fixes #543 (#544) [8dab36e]
+
+## Maintenance
+- Bump various dependencies:
+ - Upgrade to yaml.v3 (#556) [f5a83b1]
+ - Bump github/codeql-action from 1 to 2 (#549) [52f5adf]
+ - Bump github.com/google/go-cmp from 0.5.7 to 0.5.8 (#551) [5f3942d]
+ - Bump nokogiri from 1.13.4 to 1.13.6 in /docs (#554) [eb4b4c2]
+ - Use latest ginkgo (#535) [1c29028]
+ - Bump nokogiri from 1.13.3 to 1.13.4 in /docs (#541) [1ce84d5]
+ - Bump actions/setup-go from 2 to 3 (#540) [755485e]
+ - Bump nokogiri from 1.12.5 to 1.13.3 in /docs (#522) [4fbb0dc]
+ - Bump actions/checkout from 2 to 3 (#526) [ac49202]
+
+## 1.19.0
+
+## Features
+- New [`HaveEach`](https://onsi.github.io/gomega/#haveeachelement-interface) matcher to ensure that each and every element in an `array`, `slice`, or `map` satisfies the passed in matcher. (#523) [9fc2ae2] (#524) [c8ba582]
+- Users can now wrap the `Gomega` interface to implement custom behavior on each assertion. (#521) [1f2e714]
+- [`ContainElement`](https://onsi.github.io/gomega/#containelementelement-interface) now accepts an additional pointer argument. Elements that satisfy the matcher are stored in the pointer enabling developers to easily add subsequent, more detailed, assertions against the matching element. (#527) [1a4e27f]
+
+## Fixes
+- update RELEASING instructions to match ginkgo [0917cde]
+- Bump github.com/onsi/ginkgo/v2 from 2.0.0 to 2.1.3 (#519) [49ab4b0]
+- Fix CVE-2021-38561 (#534) [f1b4456]
+- Fix max number of samples in experiments on non-64-bit systems. (#528) [1c84497]
+- Remove dependency on ginkgo v1.16.4 (#530) [4dea8d5]
+- Fix for Go 1.18 (#532) [56d2a29]
+- Document precendence of timeouts (#533) [b607941]
+
+## 1.18.1
+
+## Fixes
+- Add pointer support to HaveField matcher (#495) [79e41a3]
+
+## 1.18.0
+
+## Features
+- Docs now live on the master branch in the docs folder which will make for easier PRs. The docs also use Ginkgo 2.0's new docs html/css/js. [2570272]
+- New HaveValue matcher can handle actuals that are either values (in which case they are passed on unscathed) or pointers (in which case they are indirected). [Docs here.](https://onsi.github.io/gomega/#working-with-values) (#485) [bdc087c]
+- Gmeasure has been declared GA [360db9d]
+
+## Fixes
+- Gomega now uses ioutil for Go 1.15 and lower (#492) - official support is only for the most recent two major versions of Go but this will unblock users who need to stay on older unsupported versions of Go. [c29c1c0]
+
+## Maintenace
+- Remove Travis workflow (#491) [72e6040]
+- Upgrade to Ginkgo 2.0.0 GA [f383637]
+- chore: fix description of HaveField matcher (#487) [2b4b2c0]
+- use tools.go to ensure Ginkgo cli dependencies are included [f58a52b]
+- remove dockerfile and simplify github actions to match ginkgo's actions [3f8160d]
+
+## 1.17.0
+
+### Features
+- Add HaveField matcher [3a26311]
+- add Error() assertions on the final error value of multi-return values (#480) [2f96943]
+- separate out offsets and timeouts (#478) [18a4723]
+- fix transformation error reporting (#479) [e001fab]
+- allow transform functions to report errors (#472) [bf93408]
+
+### Fixes
+Stop using deprecated ioutil package (#467) [07f405d]
+
+## 1.16.0
+
+### Features
+- feat: HaveHTTPStatus multiple expected values (#465) [aa69f1b]
+- feat: HaveHTTPHeaderWithValue() matcher (#463) [dd83a96]
+- feat: HaveHTTPBody matcher (#462) [504e1f2]
+- feat: formatter for HTTP responses (#461) [e5b3157]
+
+## 1.15.0
+
+### Fixes
+The previous version (1.14.0) introduced a change to allow `Eventually` and `Consistently` to support functions that make assertions. This was accomplished by overriding the global fail handler when running the callbacks passed to `Eventually/Consistently` in order to capture any resulting errors. Issue #457 uncovered a flaw with this approach: when multiple `Eventually`s are running concurrently they race when overriding the singleton global fail handler.
+
+1.15.0 resolves this by requiring users who want to make assertions in `Eventually/Consistently` call backs to explicitly pass in a function that takes a `Gomega` as an argument. The passed-in `Gomega` instance can be used to make assertions. Any failures will cause `Eventually` to retry the callback. This cleaner interface avoids the issue of swapping out globals but comes at the cost of changing the contract introduced in v1.14.0. As such 1.15.0 introduces a breaking change with respect to 1.14.0 - however we expect that adoption of this feature in 1.14.0 remains limited.
+
+In addition, 1.15.0 cleans up some of Gomega's internals. Most users shouldn't notice any differences stemming from the refactoring that was made.
+
+## 1.14.0
+
+### Features
+- gmeasure.SamplingConfig now suppers a MinSamplingInterval [e94dbca]
+- Eventually and Consistently support functions that make assertions [2f04e6e]
+ - Eventually and Consistently now allow their passed-in functions to make assertions.
+ These assertions must pass or the function is considered to have failed and is retried.
+ - Eventually and Consistently can now take functions with no return values. These implicitly return nil
+ if they contain no failed assertion. Otherwise they return an error wrapping the first assertion failure. This allows
+ these functions to be used with the Succeed() matcher.
+ - Introduce InterceptGomegaFailure - an analogue to InterceptGomegaFailures - that captures the first assertion failure
+ and halts execution in its passed-in callback.
+
+### Fixes
+- Call Verify GHTTPWithGomega receiver funcs (#454) [496e6fd]
+- Build a binary with an expected name (#446) [7356360]
+
+## 1.13.0
+
+### Features
+- gmeasure provides BETA support for benchmarking (#447) [8f2dfbf]
+- Set consistently and eventually defaults on init (#443) [12eb778]
+
+## 1.12.0
+
+### Features
+- Add Satisfy() matcher (#437) [c548f31]
+- tweak truncation message [3360b8c]
+- Add format.GomegaStringer (#427) [cc80b6f]
+- Add Clear() method to gbytes.Buffer [c3c0920]
+
+### Fixes
+- Fix error message in BeNumericallyMatcher (#432) [09c074a]
+- Bump github.com/onsi/ginkgo from 1.12.1 to 1.16.2 (#442) [e5f6ea0]
+- Bump github.com/golang/protobuf from 1.4.3 to 1.5.2 (#431) [adae3bf]
+- Bump golang.org/x/net (#441) [3275b35]
+
+## 1.11.0
+
+### Features
+- feature: add index to gstruct element func (#419) [334e00d]
+- feat(gexec) Add CompileTest functions. Close #410 (#411) [47c613f]
+
+### Fixes
+- Check more carefully for nils in WithTransform (#423) [3c60a15]
+- fix: typo in Makefile [b82522a]
+- Allow WithTransform function to accept a nil value (#422) [b75d2f2]
+- fix: print value type for interface{} containers (#409) [f08e2dc]
+- fix(BeElementOf): consistently flatten expected values [1fa9468]
+
+## 1.10.5
+
+### Fixes
+- fix: collections matchers should display type of expectation (#408) [6b4eb5a]
+- fix(ContainElements): consistently flatten expected values [073b880]
+- fix(ConsistOf): consistently flatten expected values [7266efe]
+
+## 1.10.4
+
+### Fixes
+- update golang net library to more recent version without vulnerability (#406) [817a8b9]
+- Correct spelling: alloted -> allotted (#403) [0bae715]
+- fix a panic in MessageWithDiff with long message (#402) [ea06b9b]
+
+## 1.10.3
+
+### Fixes
+- updates golang/x/net to fix vulnerability detected by snyk (#394) [c479356]
+
+## 1.10.2
+
+### Fixes
+- Add ExpectWithOffset, EventuallyWithOffset and ConsistentlyWithOffset to WithT (#391) [990941a]
+
+## 1.10.1
+
+### Fixes
+- Update dependencies (#389) [9f5eecd]
+
+## 1.10.0
+
+### Features
+- Add HaveHTTPStatusMatcher (#378) [f335c94]
+- Changed matcher for content-type in VerifyJSONRepresenting (#377) [6024f5b]
+- Make ghttp usable with x-unit style tests (#376) [c0be499]
+- Implement PanicWith matcher (#381) [f8032b4]
+
+## 1.9.0
+
+### Features
+- Add ContainElements matcher (#370) [2f57380]
+- Output missing and extra elements in ConsistOf failure message [a31eda7]
+- Document method LargestMatching [7c5a280]
+
+## 1.8.1
+
+### Fixes
+- Fix unexpected MatchError() behaviour (#375) [8ae7b2f]
+
+## 1.8.0
+
+### Features
+- Allow optional description to be lazily evaluated function (#364) [bf64010]
+- Support wrapped errors (#359) [0a981cb]
+
+## 1.7.1
+
+### Fixes
+- Bump go-yaml version to cover fixed ddos heuristic (#362) [95e431e]
+
+## 1.7.0
+
+### Features
+- export format property variables (#347) [642e5ba]
+
+### Fixes
+- minor fix in the documentation of ExpectWithOffset (#358) [beea727]
+
+## 1.6.0
+
+### Features
+
+- Display special chars on error [41e1b26]
+- Add BeElementOf matcher [6a48b48]
+
+### Fixes
+
+- Remove duplication in XML matcher tests [cc1a6cb]
+- Remove unnecessary conversions (#357) [7bf756a]
+- Fixed import order (#353) [2e3b965]
+- Added missing error handling in test (#355) [c98d3eb]
+- Simplify code (#356) [0001ed9]
+- Simplify code (#354) [0d9100e]
+- Fixed typos (#352) [3f647c4]
+- Add failure message tests to BeElementOf matcher [efe19c3]
+- Update go-testcov untested sections [37ee382]
+- Mark all uncovered files so go-testcov ./... works [53b150e]
+- Reenable gotip in travis [5c249dc]
+- Fix the typo of comment (#345) [f0e010e]
+- Optimize contain_element_matcher [abeb93d]
+
+
+## 1.5.0
+
+### Features
+
+- Added MatchKeys matchers [8b909fc]
+
+### Fixes and Minor Improvements
+
+- Add type aliases to remove stuttering [03b0461]
+- Don't run session_test.go on windows (#324) [5533ce8]
+
+## 1.4.3
+
+### Fixes:
+
+- ensure file name and line numbers are correctly reported for XUnit [6fff58f]
+- Fixed matcher for content-type (#305) [69d9b43]
+
+## 1.4.2
+
+### Fixes:
+
+- Add go.mod and go.sum files to define the gomega go module [f3de367, a085d30]
+- Work around go vet issue with Go v1.11 (#300) [40dd6ad]
+- Better output when using with go XUnit-style tests, fixes #255 (#297) [29a4b97]
+- Fix MatchJSON fail to parse json.RawMessage (#298) [ae19f1b]
+- show threshold in failure message of BeNumericallyMatcher (#293) [4bbecc8]
+
+## 1.4.1
+
+### Fixes:
+
+- Update documentation formatting and examples (#289) [9be8410]
+- allow 'Receive' matcher to be used with concrete types (#286) [41673fd]
+- Fix data race in ghttp server (#283) [7ac6b01]
+- Travis badge should only show master [cc102ab]
+
+## 1.4.0
+
+### Features
+- Make string pretty diff user configurable (#273) [eb112ce, 649b44d]
+
+### Fixes
+- Use httputil.DumpRequest to pretty-print unhandled requests (#278) [a4ff0fc, b7d1a52]
+- fix typo floa32 > float32 (#272) [041ae3b, 6e33911]
+- Fix link to documentation on adding your own matchers (#270) [bb2c830, fcebc62]
+- Use setters and getters to avoid race condition (#262) [13057c3, a9c79f1]
+- Avoid sending a signal if the process is not alive (#259) [b8043e5, 4fc1762]
+- Improve message from AssignableToTypeOf when expected value is nil (#281) [9c1fb20]
+
+## 1.3.0
+
+Improvements:
+
+- The `Equal` matcher matches byte slices more performantly.
+- Improved how `MatchError` matches error strings.
+- `MatchXML` ignores the order of xml node attributes.
+- Improve support for XUnit style golang tests. ([#254](https://github.com/onsi/gomega/issues/254))
+
+Bug Fixes:
+
+- Diff generation now handles multi-byte sequences correctly.
+- Multiple goroutines can now call `gexec.Build` concurrently.
+
+## 1.2.0
+
+Improvements:
+
+- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout.
+- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests.
+- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel.
+- Added `HavePrefix` and `HaveSuffix` matchers.
+- `ghttp` can now handle concurrent requests.
+- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`.
+- Improved `ghttp`'s behavior around failing assertions and panics:
+ - If a registered handler makes a failing assertion `ghttp` will return `500`.
+ - If a registered handler panics, `ghttp` will return `500` *and* fail the test. This is new behavior that may cause existing code to break. This code is almost certainly incorrect and creating a false positive.
+- `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives.
+- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher
+- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers
+- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the alloted time.
+- Added `gbytes.BufferReader` - this constructs a `gbytes.Buffer` that asynchronously reads the passed-in `io.Reader` into its buffer.
+
+Bug Fixes:
+- gexec: `session.Wait` now uses `EventuallyWithOffset` to get the right line number in the failure.
+- `ContainElement` no longer bails if a passed-in matcher errors.
+
+## 1.0 (8/2/2014)
+
+No changes. Dropping "beta" from the version number.
+
+## 1.0.0-beta (7/8/2014)
+Breaking Changes:
+
+- Changed OmegaMatcher interface. Instead of having `Match` return failure messages, two new methods `FailureMessage` and `NegatedFailureMessage` are called instead.
+- Moved and renamed OmegaFailHandler to types.GomegaFailHandler and OmegaMatcher to types.GomegaMatcher. Any references to OmegaMatcher in any custom matchers will need to be changed to point to types.GomegaMatcher
+
+New Test-Support Features:
+
+- `ghttp`: supports testing http clients
+ - Provides a flexible fake http server
+ - Provides a collection of chainable http handlers that perform assertions.
+- `gbytes`: supports making ordered assertions against streams of data
+ - Provides a `gbytes.Buffer`
+ - Provides a `Say` matcher to perform ordered assertions against output data
+- `gexec`: supports testing external processes
+ - Provides support for building Go binaries
+ - Wraps and starts `exec.Cmd` commands
+ - Makes it easy to assert against stdout and stderr
+ - Makes it easy to send signals and wait for processes to exit
+ - Provides an `Exit` matcher to assert against exit code.
+
+DSL Changes:
+
+- `Eventually` and `Consistently` can accept `time.Duration` interval and polling inputs.
+- The default timeouts for `Eventually` and `Consistently` are now configurable.
+
+New Matchers:
+
+- `ConsistOf`: order-independent assertion against the elements of an array/slice or keys of a map.
+- `BeTemporally`: like `BeNumerically` but for `time.Time`
+- `HaveKeyWithValue`: asserts a map has a given key with the given value.
+
+Updated Matchers:
+
+- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher.
+- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed.
+
+Misc:
+
+- Start using semantic versioning
+- Start maintaining changelog
+
+Major refactor:
+
+- Pull out Gomega's internal to `internal`
diff --git a/vendor/github.com/onsi/gomega/CONTRIBUTING.md b/vendor/github.com/onsi/gomega/CONTRIBUTING.md
new file mode 100644
index 0000000..0d7a099
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/CONTRIBUTING.md
@@ -0,0 +1,14 @@
+# Contributing to Gomega
+
+Your contributions to Gomega are essential for its long-term maintenance and improvement. To make a contribution:
+
+- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
+- Ensure adequate test coverage:
+ - Make sure to add appropriate unit tests
+ - Please run all tests locally (`ginkgo -r -p`) and make sure they go green before submitting the PR
+ - Please run following linter locally `go vet ./...` and make sure output does not contain any warnings
+- Update the documentation. In addition to standard `godoc` comments Gomega has extensive documentation on the `gh-pages` branch. If relevant, please submit a docs PR to that branch alongside your code PR.
+
+If you're a committer, check out RELEASING.md to learn how to cut a release.
+
+Thanks for supporting Gomega!
diff --git a/vendor/github.com/onsi/gomega/LICENSE b/vendor/github.com/onsi/gomega/LICENSE
new file mode 100644
index 0000000..9415ee7
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2013-2014 Onsi Fakhouri
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/onsi/gomega/README.md b/vendor/github.com/onsi/gomega/README.md
new file mode 100644
index 0000000..d45a8c4
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/README.md
@@ -0,0 +1,21 @@
+
+
+[](https://github.com/onsi/gomega/actions/workflows/test.yml)
+
+Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers).
+
+If you have a question, comment, bug report, feature request, etc. please open a GitHub issue.
+
+## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang
+
+Learn more about Ginkgo [here](http://onsi.github.io/ginkgo/)
+
+## Community Matchers
+
+A collection of community matchers is available on the [wiki](https://github.com/onsi/gomega/wiki).
+
+## License
+
+Gomega is MIT-Licensed
+
+The `ConsistOf` matcher uses [goraph](https://github.com/amitkgupta/goraph) which is embedded in the source to simplify distribution. goraph has an MIT license.
diff --git a/vendor/github.com/onsi/gomega/RELEASING.md b/vendor/github.com/onsi/gomega/RELEASING.md
new file mode 100644
index 0000000..9973fff
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/RELEASING.md
@@ -0,0 +1,23 @@
+A Gomega release is a tagged sha and a GitHub release. To cut a release:
+
+1. Ensure CHANGELOG.md is up to date.
+ - Use
+ ```bash
+ LAST_VERSION=$(git tag --sort=version:refname | tail -n1)
+ CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION)
+ echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md
+ ```
+ to update the changelog
+ - Categorize the changes into
+ - Breaking Changes (requires a major version)
+ - New Features (minor version)
+ - Fixes (fix version)
+ - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
+1. Update GOMEGA_VERSION in `gomega_dsl.go`
+1. Commit, push, and release:
+ ```
+ git commit -m "vM.m.p"
+ git push
+ gh release create "vM.m.p"
+ git fetch --tags origin master
+ ```
\ No newline at end of file
diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go
new file mode 100644
index 0000000..6c16806
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/format/format.go
@@ -0,0 +1,506 @@
+/*
+Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information.
+*/
+
+// untested sections: 4
+
+package format
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects
+var MaxDepth = uint(10)
+
+// MaxLength of the string representation of an object.
+// If MaxLength is set to 0, the Object will not be truncated.
+var MaxLength = 4000
+
+/*
+By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output.
+
+Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead.
+
+Note that GoString and String don't always have all the information you need to understand why a test failed!
+*/
+var UseStringerRepresentation = false
+
+/*
+Print the content of context objects. By default it will be suppressed.
+
+Set PrintContextObjects = true to enable printing of the context internals.
+*/
+var PrintContextObjects = false
+
+// TruncatedDiff choose if we should display a truncated pretty diff or not
+var TruncatedDiff = true
+
+// TruncateThreshold (default 50) specifies the maximum length string to print in string comparison assertion error
+// messages.
+var TruncateThreshold uint = 50
+
+// CharactersAroundMismatchToInclude (default 5) specifies how many contextual characters should be printed before and
+// after the first diff location in a truncated string assertion error message.
+var CharactersAroundMismatchToInclude uint = 5
+
+var contextType = reflect.TypeOf((*context.Context)(nil)).Elem()
+var timeType = reflect.TypeOf(time.Time{})
+
+// The default indentation string emitted by the format package
+var Indent = " "
+
+var longFormThreshold = 20
+
+// GomegaStringer allows for custom formating of objects for gomega.
+type GomegaStringer interface {
+ // GomegaString will be used to custom format an object.
+ // It does not follow UseStringerRepresentation value and will always be called regardless.
+ // It also ignores the MaxLength value.
+ GomegaString() string
+}
+
+/*
+CustomFormatters can be registered with Gomega via RegisterCustomFormatter()
+Any value to be rendered by Gomega is passed to each registered CustomFormatters.
+The CustomFormatter signals that it will handle formatting the value by returning (formatted-string, true)
+If the CustomFormatter does not want to handle the object it should return ("", false)
+
+Strings returned by CustomFormatters are not truncated
+*/
+type CustomFormatter func(value interface{}) (string, bool)
+type CustomFormatterKey uint
+
+var customFormatterKey CustomFormatterKey = 1
+
+type customFormatterKeyPair struct {
+ CustomFormatter
+ CustomFormatterKey
+}
+
+/*
+RegisterCustomFormatter registers a CustomFormatter and returns a CustomFormatterKey
+
+You can call UnregisterCustomFormatter with the returned key to unregister the associated CustomFormatter
+*/
+func RegisterCustomFormatter(customFormatter CustomFormatter) CustomFormatterKey {
+ key := customFormatterKey
+ customFormatterKey += 1
+ customFormatters = append(customFormatters, customFormatterKeyPair{customFormatter, key})
+ return key
+}
+
+/*
+UnregisterCustomFormatter unregisters a previously registered CustomFormatter. You should pass in the key returned by RegisterCustomFormatter
+*/
+func UnregisterCustomFormatter(key CustomFormatterKey) {
+ formatters := []customFormatterKeyPair{}
+ for _, f := range customFormatters {
+ if f.CustomFormatterKey == key {
+ continue
+ }
+ formatters = append(formatters, f)
+ }
+ customFormatters = formatters
+}
+
+var customFormatters = []customFormatterKeyPair{}
+
+/*
+Generates a formatted matcher success/failure message of the form:
+
+ Expected
+
+
+
+
+If expected is omitted, then the message looks like:
+
+ Expected
+
+
+*/
+func Message(actual interface{}, message string, expected ...interface{}) string {
+ if len(expected) == 0 {
+ return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message)
+ }
+ return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1))
+}
+
+/*
+
+Generates a nicely formatted matcher success / failure message
+
+Much like Message(...), but it attempts to pretty print diffs in strings
+
+Expected
+ : "...aaaaabaaaaa..."
+to equal |
+ : "...aaaaazaaaaa..."
+
+*/
+
+func MessageWithDiff(actual, message, expected string) string {
+ if TruncatedDiff && len(actual) >= int(TruncateThreshold) && len(expected) >= int(TruncateThreshold) {
+ diffPoint := findFirstMismatch(actual, expected)
+ formattedActual := truncateAndFormat(actual, diffPoint)
+ formattedExpected := truncateAndFormat(expected, diffPoint)
+
+ spacesBeforeFormattedMismatch := findFirstMismatch(formattedActual, formattedExpected)
+
+ tabLength := 4
+ spaceFromMessageToActual := tabLength + len(": ") - len(message)
+
+ paddingCount := spaceFromMessageToActual + spacesBeforeFormattedMismatch
+ if paddingCount < 0 {
+ return Message(formattedActual, message, formattedExpected)
+ }
+
+ padding := strings.Repeat(" ", paddingCount) + "|"
+ return Message(formattedActual, message+padding, formattedExpected)
+ }
+
+ actual = escapedWithGoSyntax(actual)
+ expected = escapedWithGoSyntax(expected)
+
+ return Message(actual, message, expected)
+}
+
+func escapedWithGoSyntax(str string) string {
+ withQuotes := fmt.Sprintf("%q", str)
+ return withQuotes[1 : len(withQuotes)-1]
+}
+
+func truncateAndFormat(str string, index int) string {
+ leftPadding := `...`
+ rightPadding := `...`
+
+ start := index - int(CharactersAroundMismatchToInclude)
+ if start < 0 {
+ start = 0
+ leftPadding = ""
+ }
+
+ // slice index must include the mis-matched character
+ lengthOfMismatchedCharacter := 1
+ end := index + int(CharactersAroundMismatchToInclude) + lengthOfMismatchedCharacter
+ if end > len(str) {
+ end = len(str)
+ rightPadding = ""
+
+ }
+ return fmt.Sprintf("\"%s\"", leftPadding+str[start:end]+rightPadding)
+}
+
+func findFirstMismatch(a, b string) int {
+ aSlice := strings.Split(a, "")
+ bSlice := strings.Split(b, "")
+
+ for index, str := range aSlice {
+ if index > len(bSlice)-1 {
+ return index
+ }
+ if str != bSlice[index] {
+ return index
+ }
+ }
+
+ if len(b) > len(a) {
+ return len(a) + 1
+ }
+
+ return 0
+}
+
+const truncateHelpText = `
+Gomega truncated this representation as it exceeds 'format.MaxLength'.
+Consider having the object provide a custom 'GomegaStringer' representation
+or adjust the parameters in Gomega's 'format' package.
+
+Learn more here: https://onsi.github.io/gomega/#adjusting-output
+`
+
+func truncateLongStrings(s string) string {
+ if MaxLength > 0 && len(s) > MaxLength {
+ var sb strings.Builder
+ for i, r := range s {
+ if i < MaxLength {
+ sb.WriteRune(r)
+ continue
+ }
+ break
+ }
+
+ sb.WriteString("...\n")
+ sb.WriteString(truncateHelpText)
+
+ return sb.String()
+ }
+ return s
+}
+
+/*
+Pretty prints the passed in object at the passed in indentation level.
+
+Object recurses into deeply nested objects emitting pretty-printed representations of their components.
+
+Modify format.MaxDepth to control how deep the recursion is allowed to go
+Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of
+recursing into the object.
+
+Set PrintContextObjects to true to print the content of objects implementing context.Context
+*/
+func Object(object interface{}, indentation uint) string {
+ indent := strings.Repeat(Indent, int(indentation))
+ value := reflect.ValueOf(object)
+ commonRepresentation := ""
+ if err, ok := object.(error); ok && !isNilValue(value) { // isNilValue check needed here to avoid nil deref due to boxed nil
+ commonRepresentation += "\n" + IndentString(err.Error(), indentation) + "\n" + indent
+ }
+ return fmt.Sprintf("%s<%s>: %s%s", indent, formatType(value), commonRepresentation, formatValue(value, indentation))
+}
+
+/*
+IndentString takes a string and indents each line by the specified amount.
+*/
+func IndentString(s string, indentation uint) string {
+ return indentString(s, indentation, true)
+}
+
+func indentString(s string, indentation uint, indentFirstLine bool) string {
+ result := &strings.Builder{}
+ components := strings.Split(s, "\n")
+ indent := strings.Repeat(Indent, int(indentation))
+ for i, component := range components {
+ if i > 0 || indentFirstLine {
+ result.WriteString(indent)
+ }
+ result.WriteString(component)
+ if i < len(components)-1 {
+ result.WriteString("\n")
+ }
+ }
+
+ return result.String()
+}
+
+func formatType(v reflect.Value) string {
+ switch v.Kind() {
+ case reflect.Invalid:
+ return "nil"
+ case reflect.Chan:
+ return fmt.Sprintf("%s | len:%d, cap:%d", v.Type(), v.Len(), v.Cap())
+ case reflect.Ptr:
+ return fmt.Sprintf("%s | 0x%x", v.Type(), v.Pointer())
+ case reflect.Slice:
+ return fmt.Sprintf("%s | len:%d, cap:%d", v.Type(), v.Len(), v.Cap())
+ case reflect.Map:
+ return fmt.Sprintf("%s | len:%d", v.Type(), v.Len())
+ default:
+ return v.Type().String()
+ }
+}
+
+func formatValue(value reflect.Value, indentation uint) string {
+ if indentation > MaxDepth {
+ return "..."
+ }
+
+ if isNilValue(value) {
+ return "nil"
+ }
+
+ if value.CanInterface() {
+ obj := value.Interface()
+
+ // if a CustomFormatter handles this values, we'll go with that
+ for _, customFormatter := range customFormatters {
+ formatted, handled := customFormatter.CustomFormatter(obj)
+ // do not truncate a user-provided CustomFormatter()
+ if handled {
+ return indentString(formatted, indentation+1, false)
+ }
+ }
+
+ // GomegaStringer will take precedence to other representations and disregards UseStringerRepresentation
+ if x, ok := obj.(GomegaStringer); ok {
+ // do not truncate a user-defined GomegaString() value
+ return indentString(x.GomegaString(), indentation+1, false)
+ }
+
+ if UseStringerRepresentation {
+ switch x := obj.(type) {
+ case fmt.GoStringer:
+ return indentString(truncateLongStrings(x.GoString()), indentation+1, false)
+ case fmt.Stringer:
+ return indentString(truncateLongStrings(x.String()), indentation+1, false)
+ }
+ }
+ }
+
+ if !PrintContextObjects {
+ if value.Type().Implements(contextType) && indentation > 1 {
+ return ""
+ }
+ }
+
+ switch value.Kind() {
+ case reflect.Bool:
+ return fmt.Sprintf("%v", value.Bool())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return fmt.Sprintf("%v", value.Int())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return fmt.Sprintf("%v", value.Uint())
+ case reflect.Uintptr:
+ return fmt.Sprintf("0x%x", value.Uint())
+ case reflect.Float32, reflect.Float64:
+ return fmt.Sprintf("%v", value.Float())
+ case reflect.Complex64, reflect.Complex128:
+ return fmt.Sprintf("%v", value.Complex())
+ case reflect.Chan:
+ return fmt.Sprintf("0x%x", value.Pointer())
+ case reflect.Func:
+ return fmt.Sprintf("0x%x", value.Pointer())
+ case reflect.Ptr:
+ return formatValue(value.Elem(), indentation)
+ case reflect.Slice:
+ return truncateLongStrings(formatSlice(value, indentation))
+ case reflect.String:
+ return truncateLongStrings(formatString(value.String(), indentation))
+ case reflect.Array:
+ return truncateLongStrings(formatSlice(value, indentation))
+ case reflect.Map:
+ return truncateLongStrings(formatMap(value, indentation))
+ case reflect.Struct:
+ if value.Type() == timeType && value.CanInterface() {
+ t, _ := value.Interface().(time.Time)
+ return t.Format(time.RFC3339Nano)
+ }
+ return truncateLongStrings(formatStruct(value, indentation))
+ case reflect.Interface:
+ return formatInterface(value, indentation)
+ default:
+ if value.CanInterface() {
+ return truncateLongStrings(fmt.Sprintf("%#v", value.Interface()))
+ }
+ return truncateLongStrings(fmt.Sprintf("%#v", value))
+ }
+}
+
+func formatString(object interface{}, indentation uint) string {
+ if indentation == 1 {
+ s := fmt.Sprintf("%s", object)
+ components := strings.Split(s, "\n")
+ result := ""
+ for i, component := range components {
+ if i == 0 {
+ result += component
+ } else {
+ result += Indent + component
+ }
+ if i < len(components)-1 {
+ result += "\n"
+ }
+ }
+
+ return result
+ } else {
+ return fmt.Sprintf("%q", object)
+ }
+}
+
+func formatSlice(v reflect.Value, indentation uint) string {
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && isPrintableString(string(v.Bytes())) {
+ return formatString(v.Bytes(), indentation)
+ }
+
+ l := v.Len()
+ result := make([]string, l)
+ longest := 0
+ for i := 0; i < l; i++ {
+ result[i] = formatValue(v.Index(i), indentation+1)
+ if len(result[i]) > longest {
+ longest = len(result[i])
+ }
+ }
+
+ if longest > longFormThreshold {
+ indenter := strings.Repeat(Indent, int(indentation))
+ return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
+ }
+ return fmt.Sprintf("[%s]", strings.Join(result, ", "))
+}
+
+func formatMap(v reflect.Value, indentation uint) string {
+ l := v.Len()
+ result := make([]string, l)
+
+ longest := 0
+ for i, key := range v.MapKeys() {
+ value := v.MapIndex(key)
+ result[i] = fmt.Sprintf("%s: %s", formatValue(key, indentation+1), formatValue(value, indentation+1))
+ if len(result[i]) > longest {
+ longest = len(result[i])
+ }
+ }
+
+ if longest > longFormThreshold {
+ indenter := strings.Repeat(Indent, int(indentation))
+ return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
+ }
+ return fmt.Sprintf("{%s}", strings.Join(result, ", "))
+}
+
+func formatStruct(v reflect.Value, indentation uint) string {
+ t := v.Type()
+
+ l := v.NumField()
+ result := []string{}
+ longest := 0
+ for i := 0; i < l; i++ {
+ structField := t.Field(i)
+ fieldEntry := v.Field(i)
+ representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1))
+ result = append(result, representation)
+ if len(representation) > longest {
+ longest = len(representation)
+ }
+ }
+ if longest > longFormThreshold {
+ indenter := strings.Repeat(Indent, int(indentation))
+ return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
+ }
+ return fmt.Sprintf("{%s}", strings.Join(result, ", "))
+}
+
+func formatInterface(v reflect.Value, indentation uint) string {
+ return fmt.Sprintf("<%s>%s", formatType(v.Elem()), formatValue(v.Elem(), indentation))
+}
+
+func isNilValue(a reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Invalid:
+ return true
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return a.IsNil()
+ }
+
+ return false
+}
+
+/*
+Returns true when the string is entirely made of printable runes, false otherwise.
+*/
+func isPrintableString(str string) bool {
+ for _, runeValue := range str {
+ if !strconv.IsPrint(runeValue) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
new file mode 100644
index 0000000..9697d51
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -0,0 +1,537 @@
+/*
+Gomega is the Ginkgo BDD-style testing framework's preferred matcher library.
+
+The godoc documentation describes Gomega's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/
+
+Gomega on Github: http://github.com/onsi/gomega
+
+Learn more about Ginkgo online: http://onsi.github.io/ginkgo
+
+Ginkgo on Github: http://github.com/onsi/ginkgo
+
+Gomega is MIT-Licensed
+*/
+package gomega
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/onsi/gomega/internal"
+ "github.com/onsi/gomega/types"
+)
+
+const GOMEGA_VERSION = "1.33.1"
+
+const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
+If you're using Ginkgo then you probably forgot to put your assertion in an It().
+Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT().
+Depending on your vendoring solution you may be inadvertently importing gomega and subpackages (e.g. ghhtp, gexec,...) from different locations.
+`
+
+// Gomega describes the essential Gomega DSL. This interface allows libraries
+// to abstract between the standard package-level function implementations
+// and alternatives like *WithT.
+//
+// The types in the top-level DSL have gotten a bit messy due to earlier deprecations that avoid stuttering
+// and due to an accidental use of a concrete type (*WithT) in an earlier release.
+//
+// As of 1.15 both the WithT and Ginkgo variants of Gomega are implemented by the same underlying object
+// however one (the Ginkgo variant) is exported as an interface (types.Gomega) whereas the other (the withT variant)
+// is shared as a concrete type (*WithT, which is aliased to *internal.Gomega). 1.15 did not clean this mess up to ensure
+// that declarations of *WithT in existing code are not broken by the upgrade to 1.15.
+type Gomega = types.Gomega
+
+// DefaultGomega supplies the standard package-level implementation
+var Default = Gomega(internal.NewGomega(internal.FetchDefaultDurationBundle()))
+
+// NewGomega returns an instance of Gomega wired into the passed-in fail handler.
+// You generally don't need to use this when using Ginkgo - RegisterFailHandler will wire up the global gomega
+// However creating a NewGomega with a custom fail handler can be useful in contexts where you want to use Gomega's
+// rich ecosystem of matchers without causing a test to fail. For example, to aggregate a series of potential failures
+// or for use in a non-test setting.
+func NewGomega(fail types.GomegaFailHandler) Gomega {
+ return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithFailHandler(fail)
+}
+
+// WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage
+// Gomega's rich ecosystem of matchers in standard `testing` test suites.
+//
+// Use `NewWithT` to instantiate a `WithT`
+//
+// As of 1.15 both the WithT and Ginkgo variants of Gomega are implemented by the same underlying object
+// however one (the Ginkgo variant) is exported as an interface (types.Gomega) whereas the other (the withT variant)
+// is shared as a concrete type (*WithT, which is aliased to *internal.Gomega). 1.15 did not clean this mess up to ensure
+// that declarations of *WithT in existing code are not broken by the upgrade to 1.15.
+type WithT = internal.Gomega
+
+// GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter.
+type GomegaWithT = WithT
+
+// inner is an interface that allows users to provide a wrapper around Default. The wrapper
+// must implement the inner interface and return either the original Default or the result of
+// a call to NewGomega().
+type inner interface {
+ Inner() Gomega
+}
+
+func internalGomega(g Gomega) *internal.Gomega {
+ if v, ok := g.(inner); ok {
+ return v.Inner().(*internal.Gomega)
+ }
+ return g.(*internal.Gomega)
+}
+
+// NewWithT takes a *testing.T and returns a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with
+// Gomega's rich ecosystem of matchers in standard `testing` test suits.
+//
+// func TestFarmHasCow(t *testing.T) {
+// g := gomega.NewWithT(t)
+//
+// f := farm.New([]string{"Cow", "Horse"})
+// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
+// }
+func NewWithT(t types.GomegaTestingT) *WithT {
+ return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithT(t)
+}
+
+// NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter.
+var NewGomegaWithT = NewWithT
+
+// RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails
+// the fail handler passed into RegisterFailHandler is called.
+func RegisterFailHandler(fail types.GomegaFailHandler) {
+ internalGomega(Default).ConfigureWithFailHandler(fail)
+}
+
+// RegisterFailHandlerWithT is deprecated and will be removed in a future release.
+// users should use RegisterFailHandler, or RegisterTestingT
+func RegisterFailHandlerWithT(_ types.GomegaTestingT, fail types.GomegaFailHandler) {
+ fmt.Println("RegisterFailHandlerWithT is deprecated. Please use RegisterFailHandler or RegisterTestingT instead.")
+ internalGomega(Default).ConfigureWithFailHandler(fail)
+}
+
+// RegisterTestingT connects Gomega to Golang's XUnit style
+// Testing.T tests. It is now deprecated and you should use NewWithT() instead to get a fresh instance of Gomega for each test.
+func RegisterTestingT(t types.GomegaTestingT) {
+ internalGomega(Default).ConfigureWithT(t)
+}
+
+// InterceptGomegaFailures runs a given callback and returns an array of
+// failure messages generated by any Gomega assertions within the callback.
+// Execution continues after the first failure allowing users to collect all failures
+// in the callback.
+//
+// This is most useful when testing custom matchers, but can also be used to check
+// on a value using a Gomega assertion without causing a test failure.
+func InterceptGomegaFailures(f func()) []string {
+ originalHandler := internalGomega(Default).Fail
+ failures := []string{}
+ internalGomega(Default).Fail = func(message string, callerSkip ...int) {
+ failures = append(failures, message)
+ }
+ defer func() {
+ internalGomega(Default).Fail = originalHandler
+ }()
+ f()
+ return failures
+}
+
+// InterceptGomegaFailure runs a given callback and returns the first
+// failure message generated by any Gomega assertions within the callback, wrapped in an error.
+//
+// The callback ceases execution as soon as the first failed assertion occurs, however Gomega
+// does not register a failure with the FailHandler registered via RegisterFailHandler - it is up
+// to the user to decide what to do with the returned error
+func InterceptGomegaFailure(f func()) (err error) {
+ originalHandler := internalGomega(Default).Fail
+ internalGomega(Default).Fail = func(message string, callerSkip ...int) {
+ err = errors.New(message)
+ panic("stop execution")
+ }
+
+ defer func() {
+ internalGomega(Default).Fail = originalHandler
+ if e := recover(); e != nil {
+ if err == nil {
+ panic(e)
+ }
+ }
+ }()
+
+ f()
+ return err
+}
+
+func ensureDefaultGomegaIsConfigured() {
+ if !internalGomega(Default).IsConfigured() {
+ panic(nilGomegaPanic)
+ }
+}
+
+// Ω wraps an actual value allowing assertions to be made on it:
+//
+// Ω("foo").Should(Equal("foo"))
+//
+// If Ω is passed more than one argument it will pass the *first* argument to the matcher.
+// All subsequent arguments will be required to be nil/zero.
+//
+// This is convenient if you want to make an assertion on a method/function that returns
+// a value and an error - a common patter in Go.
+//
+// For example, given a function with signature:
+//
+// func MyAmazingThing() (int, error)
+//
+// Then:
+//
+// Ω(MyAmazingThing()).Should(Equal(3))
+//
+// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
+//
+// Ω and Expect are identical
+func Ω(actual interface{}, extra ...interface{}) Assertion {
+ ensureDefaultGomegaIsConfigured()
+ return Default.Ω(actual, extra...)
+}
+
+// Expect wraps an actual value allowing assertions to be made on it:
+//
+// Expect("foo").To(Equal("foo"))
+//
+// If Expect is passed more than one argument it will pass the *first* argument to the matcher.
+// All subsequent arguments will be required to be nil/zero.
+//
+// This is convenient if you want to make an assertion on a method/function that returns
+// a value and an error - a common pattern in Go.
+//
+// For example, given a function with signature:
+//
+// func MyAmazingThing() (int, error)
+//
+// Then:
+//
+// Expect(MyAmazingThing()).Should(Equal(3))
+//
+// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
+//
+// Expect and Ω are identical
+func Expect(actual interface{}, extra ...interface{}) Assertion {
+ ensureDefaultGomegaIsConfigured()
+ return Default.Expect(actual, extra...)
+}
+
+// ExpectWithOffset wraps an actual value allowing assertions to be made on it:
+//
+// ExpectWithOffset(1, "foo").To(Equal("foo"))
+//
+// Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
+// that is used to modify the call-stack offset when computing line numbers. It is
+// the same as `Expect(...).WithOffset`.
+//
+// This is most useful in helper functions that make assertions. If you want Gomega's
+// error message to refer to the calling line in the test (as opposed to the line in the helper function)
+// set the first argument of `ExpectWithOffset` appropriately.
+func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion {
+ ensureDefaultGomegaIsConfigured()
+ return Default.ExpectWithOffset(offset, actual, extra...)
+}
+
+/*
+Eventually enables making assertions on asynchronous behavior.
+
+Eventually checks that an assertion *eventually* passes. Eventually blocks when called and attempts an assertion periodically until it passes or a timeout occurs. Both the timeout and polling interval are configurable as optional arguments.
+The first optional argument is the timeout (which defaults to 1s), the second is the polling interval (which defaults to 10ms). Both intervals can be specified as time.Duration, parsable duration strings or floats/integers (in which case they are interpreted as seconds). In addition an optional context.Context can be passed in - Eventually will keep trying until either the timeout expires or the context is cancelled, whichever comes first.
+
+Eventually works with any Gomega compatible matcher and supports making assertions against three categories of actual value:
+
+**Category 1: Making Eventually assertions on values**
+
+There are several examples of values that can change over time. These can be passed in to Eventually and will be passed to the matcher repeatedly until a match occurs. For example:
+
+ c := make(chan bool)
+ go DoStuff(c)
+ Eventually(c, "50ms").Should(BeClosed())
+
+will poll the channel repeatedly until it is closed. In this example `Eventually` will block until either the specified timeout of 50ms has elapsed or the channel is closed, whichever comes first.
+
+Several Gomega libraries allow you to use Eventually in this way. For example, the gomega/gexec package allows you to block until a *gexec.Session exits successfully via:
+
+ Eventually(session).Should(gexec.Exit(0))
+
+And the gomega/gbytes package allows you to monitor a streaming *gbytes.Buffer until a given string is seen:
+
+ Eventually(buffer).Should(gbytes.Say("hello there"))
+
+In these examples, both `session` and `buffer` are designed to be thread-safe when polled by the `Exit` and `Say` matchers. This is not true in general of most raw values, so while it is tempting to do something like:
+
+ // THIS IS NOT THREAD-SAFE
+ var s *string
+ go mutateStringEventually(s)
+ Eventually(s).Should(Equal("I've changed"))
+
+this will trigger Go's race detector as the goroutine polling via Eventually will race over the value of s with the goroutine mutating the string. For cases like this you can use channels or introduce your own locking around s by passing Eventually a function.
+
+**Category 2: Make Eventually assertions on functions**
+
+Eventually can be passed functions that **return at least one value**. When configured this way, Eventually will poll the function repeatedly and pass the first returned value to the matcher.
+
+For example:
+
+ Eventually(func() int {
+ return client.FetchCount()
+ }).Should(BeNumerically(">=", 17))
+
+ will repeatedly poll client.FetchCount until the BeNumerically matcher is satisfied. (Note that this example could have been written as Eventually(client.FetchCount).Should(BeNumerically(">=", 17)))
+
+If multiple values are returned by the function, Eventually will pass the first value to the matcher and require that all others are zero-valued. This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go.
+
+For example, consider a method that returns a value and an error:
+
+ func FetchFromDB() (string, error)
+
+Then
+
+ Eventually(FetchFromDB).Should(Equal("got it"))
+
+will pass only if and when the returned error is nil *and* the returned string satisfies the matcher.
+
+Eventually can also accept functions that take arguments, however you must provide those arguments using .WithArguments(). For example, consider a function that takes a user-id and makes a network request to fetch a full name:
+
+ func FetchFullName(userId int) (string, error)
+
+You can poll this function like so:
+
+ Eventually(FetchFullName).WithArguments(1138).Should(Equal("Wookie"))
+
+It is important to note that the function passed into Eventually is invoked *synchronously* when polled. Eventually does not (in fact, it cannot) kill the function if it takes longer to return than Eventually's configured timeout. A common practice here is to use a context. Here's an example that combines Ginkgo's spec timeout support with Eventually:
+
+ It("fetches the correct count", func(ctx SpecContext) {
+ Eventually(ctx, func() int {
+ return client.FetchCount(ctx, "/users")
+ }).Should(BeNumerically(">=", 17))
+ }, SpecTimeout(time.Second))
+
+you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in contexts play nicely with passed-in arguments as long as the context appears first. You can rewrite the above example as:
+
+ It("fetches the correct count", func(ctx SpecContext) {
+ Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17))
+ }, SpecTimeout(time.Second))
+
+Either way the context passd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit.
+
+**Category 3: Making assertions _in_ the function passed into Eventually**
+
+When testing complex systems it can be valuable to assert that a _set_ of assertions passes Eventually. Eventually supports this by accepting functions that take a single Gomega argument and return zero or more values.
+
+Here's an example that makes some assertions and returns a value and error:
+
+ Eventually(func(g Gomega) (Widget, error) {
+ ids, err := client.FetchIDs()
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(ids).To(ContainElement(1138))
+ return client.FetchWidget(1138)
+ }).Should(Equal(expectedWidget))
+
+will pass only if all the assertions in the polled function pass and the return value satisfied the matcher.
+
+Eventually also supports a special case polling function that takes a single Gomega argument and returns no values. Eventually assumes such a function is making assertions and is designed to work with the Succeed matcher to validate that all assertions have passed.
+For example:
+
+ Eventually(func(g Gomega) {
+ model, err := client.Find(1138)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(model.Reticulate()).To(Succeed())
+ g.Expect(model.IsReticulated()).To(BeTrue())
+ g.Expect(model.Save()).To(Succeed())
+ }).Should(Succeed())
+
+will rerun the function until all assertions pass.
+
+You can also pass additional arguments to functions that take a Gomega. The only rule is that the Gomega argument must be first. If you also want to pass the context attached to Eventually you must ensure that is the second argument. For example:
+
+ Eventually(func(g Gomega, ctx context.Context, path string, expected ...string){
+ tok, err := client.GetToken(ctx)
+ g.Expect(err).NotTo(HaveOccurred())
+
+ elements, err := client.Fetch(ctx, tok, path)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(elements).To(ConsistOf(expected))
+ }).WithContext(ctx).WithArguments("/names", "Joe", "Jane", "Sam").Should(Succeed())
+
+You can ensure that you get a number of consecutive successful tries before succeeding using `MustPassRepeatedly(int)`. For Example:
+
+ int count := 0
+ Eventually(func() bool {
+ count++
+ return count > 2
+ }).MustPassRepeatedly(2).Should(BeTrue())
+ // Because we had to wait for 2 calls that returned true
+ Expect(count).To(Equal(3))
+
+Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods:
+
+ Eventually(..., "10s", "2s", ctx).Should(...)
+
+is equivalent to
+
+ Eventually(...).WithTimeout(10*time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...)
+*/
+func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+ ensureDefaultGomegaIsConfigured()
+ return Default.Eventually(actualOrCtx, args...)
+}
+
+// EventuallyWithOffset operates like Eventually but takes an additional
+// initial argument to indicate an offset in the call stack. This is useful when building helper
+// functions that contain matchers. To learn more, read about `ExpectWithOffset`.
+//
+// `EventuallyWithOffset` is the same as `Eventually(...).WithOffset`.
+//
+// `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are
+// the same as `Eventually(...).WithOffset(...).WithTimeout` or
+// `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`.
+func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+ ensureDefaultGomegaIsConfigured()
+ return Default.EventuallyWithOffset(offset, actualOrCtx, args...)
+}
+
+/*
+Consistently, like Eventually, enables making assertions on asynchronous behavior.
+
+Consistently blocks when called for a specified duration. During that duration Consistently repeatedly polls its matcher and ensures that it is satisfied. If the matcher is consistently satisfied, then Consistently will pass. Otherwise Consistently will fail.
+
+Both the total waiting duration and the polling interval are configurable as optional arguments. The first optional argument is the duration that Consistently will run for (defaults to 100ms), and the second argument is the polling interval (defaults to 10ms). As with Eventually, these intervals can be passed in as time.Duration, parsable duration strings or an integer or float number of seconds. You can also pass in an optional context.Context - Consistently will exit early (with a failure) if the context is cancelled before the waiting duration expires.
+
+Consistently accepts the same three categories of actual as Eventually, check the Eventually docs to learn more.
+
+Consistently is useful in cases where you want to assert that something *does not happen* for a period of time. For example, you may want to assert that a goroutine does *not* send data down a channel. In this case you could write:
+
+ Consistently(channel, "200ms").ShouldNot(Receive())
+
+This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received.
+*/
+func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+ ensureDefaultGomegaIsConfigured()
+ return Default.Consistently(actualOrCtx, args...)
+}
+
+// ConsistentlyWithOffset operates like Consistently but takes an additional
+// initial argument to indicate an offset in the call stack. This is useful when building helper
+// functions that contain matchers. To learn more, read about `ExpectWithOffset`.
+//
+// `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and
+// optional `WithTimeout` and `WithPolling`.
+func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+ ensureDefaultGomegaIsConfigured()
+ return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...)
+}
+
+/*
+StopTrying can be used to signal to Eventually and Consistentlythat they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal.
+
+You can send the StopTrying signal by either returning StopTrying("message") as an error from your passed-in function _or_ by calling StopTrying("message").Now() to trigger a panic and end execution.
+
+You can also wrap StopTrying around an error with `StopTrying("message").Wrap(err)` and can attach additional objects via `StopTrying("message").Attach("description", object). When rendered, the signal will include the wrapped error and any attached objects rendered using Gomega's default formatting.
+
+Here are a couple of examples. This is how you might use StopTrying() as an error to signal that Eventually should stop:
+
+ playerIndex, numPlayers := 0, 11
+ Eventually(func() (string, error) {
+ if playerIndex == numPlayers {
+ return "", StopTrying("no more players left")
+ }
+ name := client.FetchPlayer(playerIndex)
+ playerIndex += 1
+ return name, nil
+ }).Should(Equal("Patrick Mahomes"))
+
+And here's an example where `StopTrying().Now()` is called to halt execution immediately:
+
+ Eventually(func() []string {
+ names, err := client.FetchAllPlayers()
+ if err == client.IRRECOVERABLE_ERROR {
+ StopTrying("Irrecoverable error occurred").Wrap(err).Now()
+ }
+ return names
+ }).Should(ContainElement("Patrick Mahomes"))
+*/
+var StopTrying = internal.StopTrying
+
+/*
+TryAgainAfter() allows you to adjust the polling interval for the _next_ iteration of `Eventually` or `Consistently`. Like `StopTrying` you can either return `TryAgainAfter` as an error or trigger it immedieately with `.Now()`
+
+When `TryAgainAfter(` is triggered `Eventually` and `Consistently` will wait for that duration. If a timeout occurs before the next poll is triggered both `Eventually` and `Consistently` will always fail with the content of the TryAgainAfter message. As with StopTrying you can `.Wrap()` and error and `.Attach()` additional objects to `TryAgainAfter`.
+*/
+var TryAgainAfter = internal.TryAgainAfter
+
+/*
+PollingSignalError is the error returned by StopTrying() and TryAgainAfter()
+*/
+type PollingSignalError = internal.PollingSignalError
+
+// SetDefaultEventuallyTimeout sets the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses.
+func SetDefaultEventuallyTimeout(t time.Duration) {
+ Default.SetDefaultEventuallyTimeout(t)
+}
+
+// SetDefaultEventuallyPollingInterval sets the default polling interval for Eventually.
+func SetDefaultEventuallyPollingInterval(t time.Duration) {
+ Default.SetDefaultEventuallyPollingInterval(t)
+}
+
+// SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satisfied for this long.
+func SetDefaultConsistentlyDuration(t time.Duration) {
+ Default.SetDefaultConsistentlyDuration(t)
+}
+
+// SetDefaultConsistentlyPollingInterval sets the default polling interval for Consistently.
+func SetDefaultConsistentlyPollingInterval(t time.Duration) {
+ Default.SetDefaultConsistentlyPollingInterval(t)
+}
+
+// AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against
+// the matcher passed to the Should and ShouldNot methods.
+//
+// Both Should and ShouldNot take a variadic optionalDescription argument.
+// This argument allows you to make your failure messages more descriptive.
+// If a single argument of type `func() string` is passed, this function will be lazily evaluated if a failure occurs
+// and the returned string is used to annotate the failure message.
+// Otherwise, this argument is passed on to fmt.Sprintf() and then used to annotate the failure message.
+//
+// Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed.
+//
+// Example:
+//
+// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
+// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." })
+type AsyncAssertion = types.AsyncAssertion
+
+// GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter.
+type GomegaAsyncAssertion = types.AsyncAssertion
+
+// Assertion is returned by Ω and Expect and compares the actual value to the matcher
+// passed to the Should/ShouldNot and To/ToNot/NotTo methods.
+//
+// Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect
+// though this is not enforced.
+//
+// All methods take a variadic optionalDescription argument.
+// This argument allows you to make your failure messages more descriptive.
+// If a single argument of type `func() string` is passed, this function will be lazily evaluated if a failure occurs
+// and the returned string is used to annotate the failure message.
+// Otherwise, this argument is passed on to fmt.Sprintf() and then used to annotate the failure message.
+//
+// All methods return a bool that is true if the assertion passed and false if it failed.
+//
+// Example:
+//
+// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm)
+type Assertion = types.Assertion
+
+// GomegaAssertion is deprecated in favor of Assertion, which does not stutter.
+type GomegaAssertion = types.Assertion
+
+// OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it
+type OmegaMatcher = types.GomegaMatcher
diff --git a/vendor/github.com/onsi/gomega/internal/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion.go
new file mode 100644
index 0000000..08356a6
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/assertion.go
@@ -0,0 +1,161 @@
+package internal
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+type Assertion struct {
+ actuals []interface{} // actual value plus all extra values
+ actualIndex int // value to pass to the matcher
+ vet vetinari // the vet to call before calling Gomega matcher
+ offset int
+ g *Gomega
+}
+
+// ...obligatory discworld reference, as "vetineer" doesn't sound ... quite right.
+type vetinari func(assertion *Assertion, optionalDescription ...interface{}) bool
+
+func NewAssertion(actualInput interface{}, g *Gomega, offset int, extra ...interface{}) *Assertion {
+ return &Assertion{
+ actuals: append([]interface{}{actualInput}, extra...),
+ actualIndex: 0,
+ vet: (*Assertion).vetActuals,
+ offset: offset,
+ g: g,
+ }
+}
+
+func (assertion *Assertion) WithOffset(offset int) types.Assertion {
+ assertion.offset = offset
+ return assertion
+}
+
+func (assertion *Assertion) Error() types.Assertion {
+ return &Assertion{
+ actuals: assertion.actuals,
+ actualIndex: len(assertion.actuals) - 1,
+ vet: (*Assertion).vetError,
+ offset: assertion.offset,
+ g: assertion.g,
+ }
+}
+
+func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+ assertion.g.THelper()
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
+}
+
+func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+ assertion.g.THelper()
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+ assertion.g.THelper()
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
+}
+
+func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+ assertion.g.THelper()
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+ assertion.g.THelper()
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
+ switch len(optionalDescription) {
+ case 0:
+ return ""
+ case 1:
+ if describe, ok := optionalDescription[0].(func() string); ok {
+ return describe() + "\n"
+ }
+ }
+ return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
+}
+
+func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
+ actualInput := assertion.actuals[assertion.actualIndex]
+ matches, err := matcher.Match(actualInput)
+ assertion.g.THelper()
+ if err != nil {
+ description := assertion.buildDescription(optionalDescription...)
+ assertion.g.Fail(description+err.Error(), 2+assertion.offset)
+ return false
+ }
+ if matches != desiredMatch {
+ var message string
+ if desiredMatch {
+ message = matcher.FailureMessage(actualInput)
+ } else {
+ message = matcher.NegatedFailureMessage(actualInput)
+ }
+ description := assertion.buildDescription(optionalDescription...)
+ assertion.g.Fail(description+message, 2+assertion.offset)
+ return false
+ }
+
+ return true
+}
+
+// vetActuals vets the actual values, with the (optional) exception of a
+// specific value, such as the first value in case non-error assertions, or the
+// last value in case of Error()-based assertions.
+func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool {
+ success, message := vetActuals(assertion.actuals, assertion.actualIndex)
+ if success {
+ return true
+ }
+
+ description := assertion.buildDescription(optionalDescription...)
+ assertion.g.THelper()
+ assertion.g.Fail(description+message, 2+assertion.offset)
+ return false
+}
+
+// vetError vets the actual values, except for the final error value, in case
+// the final error value is non-zero. Otherwise, it doesn't vet the actual
+// values, as these are allowed to take on any values unless there is a non-zero
+// error value.
+func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool {
+ if err := assertion.actuals[assertion.actualIndex]; err != nil {
+ // Go error result idiom: all other actual values must be zero values.
+ return assertion.vetActuals(optionalDescription...)
+ }
+ return true
+}
+
+// vetActuals vets a slice of actual values, optionally skipping a particular
+// value slice element, such as the first or last value slice element.
+func vetActuals(actuals []interface{}, skipIndex int) (bool, string) {
+ for i, actual := range actuals {
+ if i == skipIndex {
+ continue
+ }
+ if actual != nil {
+ zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface()
+ if !reflect.DeepEqual(zeroValue, actual) {
+ var message string
+ if err, ok := actual.(error); ok {
+ message = fmt.Sprintf("Unexpected error: %s\n%s", err, format.Object(err, 1))
+ } else {
+ message = fmt.Sprintf("Unexpected non-nil/non-zero argument at index %d:\n\t<%T>: %#v", i, actual, actual)
+ }
+ return false, message
+ }
+ }
+ }
+ return true, ""
+}
diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go
new file mode 100644
index 0000000..cde9e2e
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go
@@ -0,0 +1,576 @@
+package internal
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+var errInterface = reflect.TypeOf((*error)(nil)).Elem()
+var gomegaType = reflect.TypeOf((*types.Gomega)(nil)).Elem()
+var contextType = reflect.TypeOf(new(context.Context)).Elem()
+
+type formattedGomegaError interface {
+ FormattedGomegaError() string
+}
+
+type asyncPolledActualError struct {
+ message string
+}
+
+func (err *asyncPolledActualError) Error() string {
+ return err.message
+}
+
+func (err *asyncPolledActualError) FormattedGomegaError() string {
+ return err.message
+}
+
+type contextWithAttachProgressReporter interface {
+ AttachProgressReporter(func() string) func()
+}
+
+type asyncGomegaHaltExecutionError struct{}
+
+func (a asyncGomegaHaltExecutionError) GinkgoRecoverShouldIgnoreThisPanic() {}
+func (a asyncGomegaHaltExecutionError) Error() string {
+ return `An assertion has failed in a goroutine. You should call
+
+ defer GinkgoRecover()
+
+at the top of the goroutine that caused this panic. This will allow Ginkgo and Gomega to correctly capture and manage this panic.`
+}
+
+type AsyncAssertionType uint
+
+const (
+ AsyncAssertionTypeEventually AsyncAssertionType = iota
+ AsyncAssertionTypeConsistently
+)
+
+func (at AsyncAssertionType) String() string {
+ switch at {
+ case AsyncAssertionTypeEventually:
+ return "Eventually"
+ case AsyncAssertionTypeConsistently:
+ return "Consistently"
+ }
+ return "INVALID ASYNC ASSERTION TYPE"
+}
+
+type AsyncAssertion struct {
+ asyncType AsyncAssertionType
+
+ actualIsFunc bool
+ actual interface{}
+ argsToForward []interface{}
+
+ timeoutInterval time.Duration
+ pollingInterval time.Duration
+ mustPassRepeatedly int
+ ctx context.Context
+ offset int
+ g *Gomega
+}
+
+func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion {
+ out := &AsyncAssertion{
+ asyncType: asyncType,
+ timeoutInterval: timeoutInterval,
+ pollingInterval: pollingInterval,
+ mustPassRepeatedly: mustPassRepeatedly,
+ offset: offset,
+ ctx: ctx,
+ g: g,
+ }
+
+ out.actual = actualInput
+ if actualInput != nil && reflect.TypeOf(actualInput).Kind() == reflect.Func {
+ out.actualIsFunc = true
+ }
+
+ return out
+}
+
+func (assertion *AsyncAssertion) WithOffset(offset int) types.AsyncAssertion {
+ assertion.offset = offset
+ return assertion
+}
+
+func (assertion *AsyncAssertion) WithTimeout(interval time.Duration) types.AsyncAssertion {
+ assertion.timeoutInterval = interval
+ return assertion
+}
+
+func (assertion *AsyncAssertion) WithPolling(interval time.Duration) types.AsyncAssertion {
+ assertion.pollingInterval = interval
+ return assertion
+}
+
+func (assertion *AsyncAssertion) Within(timeout time.Duration) types.AsyncAssertion {
+ assertion.timeoutInterval = timeout
+ return assertion
+}
+
+func (assertion *AsyncAssertion) ProbeEvery(interval time.Duration) types.AsyncAssertion {
+ assertion.pollingInterval = interval
+ return assertion
+}
+
+func (assertion *AsyncAssertion) WithContext(ctx context.Context) types.AsyncAssertion {
+ assertion.ctx = ctx
+ return assertion
+}
+
+func (assertion *AsyncAssertion) WithArguments(argsToForward ...interface{}) types.AsyncAssertion {
+ assertion.argsToForward = argsToForward
+ return assertion
+}
+
+func (assertion *AsyncAssertion) MustPassRepeatedly(count int) types.AsyncAssertion {
+ assertion.mustPassRepeatedly = count
+ return assertion
+}
+
+func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+ assertion.g.THelper()
+ vetOptionalDescription("Asynchronous assertion", optionalDescription...)
+ return assertion.match(matcher, true, optionalDescription...)
+}
+
+func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+ assertion.g.THelper()
+ vetOptionalDescription("Asynchronous assertion", optionalDescription...)
+ return assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string {
+ switch len(optionalDescription) {
+ case 0:
+ return ""
+ case 1:
+ if describe, ok := optionalDescription[0].(func() string); ok {
+ return describe() + "\n"
+ }
+ }
+ return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
+}
+
+func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (interface{}, error) {
+ if len(values) == 0 {
+ return nil, &asyncPolledActualError{
+ message: fmt.Sprintf("The function passed to %s did not return any values", assertion.asyncType),
+ }
+ }
+
+ actual := values[0].Interface()
+ if _, ok := AsPollingSignalError(actual); ok {
+ return actual, actual.(error)
+ }
+
+ var err error
+ for i, extraValue := range values[1:] {
+ extra := extraValue.Interface()
+ if extra == nil {
+ continue
+ }
+ if _, ok := AsPollingSignalError(extra); ok {
+ return actual, extra.(error)
+ }
+ extraType := reflect.TypeOf(extra)
+ zero := reflect.Zero(extraType).Interface()
+ if reflect.DeepEqual(extra, zero) {
+ continue
+ }
+ if i == len(values)-2 && extraType.Implements(errInterface) {
+ err = extra.(error)
+ }
+ if err == nil {
+ err = &asyncPolledActualError{
+ message: fmt.Sprintf("The function passed to %s had an unexpected non-nil/non-zero return value at index %d:\n%s", assertion.asyncType, i+1, format.Object(extra, 1)),
+ }
+ }
+ }
+
+ return actual, err
+}
+
+func (assertion *AsyncAssertion) invalidFunctionError(t reflect.Type) error {
+ return fmt.Errorf(`The function passed to %s had an invalid signature of %s. Functions passed to %s must either:
+
+ (a) have return values or
+ (b) take a Gomega interface as their first argument and use that Gomega instance to make assertions.
+
+You can learn more at https://onsi.github.io/gomega/#eventually
+`, assertion.asyncType, t, assertion.asyncType)
+}
+
+func (assertion *AsyncAssertion) noConfiguredContextForFunctionError() error {
+ return fmt.Errorf(`The function passed to %s requested a context.Context, but no context has been provided. Please pass one in using %s().WithContext().
+
+You can learn more at https://onsi.github.io/gomega/#eventually
+`, assertion.asyncType, assertion.asyncType)
+}
+
+func (assertion *AsyncAssertion) argumentMismatchError(t reflect.Type, numProvided int) error {
+ have := "have"
+ if numProvided == 1 {
+ have = "has"
+ }
+ return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the corect set of arguments.
+
+You can learn more at https://onsi.github.io/gomega/#eventually
+`, assertion.asyncType, t, t.NumIn(), numProvided, have, assertion.asyncType)
+}
+
+func (assertion *AsyncAssertion) invalidMustPassRepeatedlyError(reason string) error {
+ return fmt.Errorf(`Invalid use of MustPassRepeatedly with %s %s
+
+You can learn more at https://onsi.github.io/gomega/#eventually
+`, assertion.asyncType, reason)
+}
+
+func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error), error) {
+ if !assertion.actualIsFunc {
+ return func() (interface{}, error) { return assertion.actual, nil }, nil
+ }
+ actualValue := reflect.ValueOf(assertion.actual)
+ actualType := reflect.TypeOf(assertion.actual)
+ numIn, numOut, isVariadic := actualType.NumIn(), actualType.NumOut(), actualType.IsVariadic()
+
+ if numIn == 0 && numOut == 0 {
+ return nil, assertion.invalidFunctionError(actualType)
+ }
+ takesGomega, takesContext := false, false
+ if numIn > 0 {
+ takesGomega, takesContext = actualType.In(0).Implements(gomegaType), actualType.In(0).Implements(contextType)
+ }
+ if takesGomega && numIn > 1 && actualType.In(1).Implements(contextType) {
+ takesContext = true
+ }
+ if takesContext && len(assertion.argsToForward) > 0 && reflect.TypeOf(assertion.argsToForward[0]).Implements(contextType) {
+ takesContext = false
+ }
+ if !takesGomega && numOut == 0 {
+ return nil, assertion.invalidFunctionError(actualType)
+ }
+ if takesContext && assertion.ctx == nil {
+ return nil, assertion.noConfiguredContextForFunctionError()
+ }
+
+ var assertionFailure error
+ inValues := []reflect.Value{}
+ if takesGomega {
+ inValues = append(inValues, reflect.ValueOf(NewGomega(assertion.g.DurationBundle).ConfigureWithFailHandler(func(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+ _, file, line, _ := runtime.Caller(skip + 1)
+ assertionFailure = &asyncPolledActualError{
+ message: fmt.Sprintf("The function passed to %s failed at %s:%d with:\n%s", assertion.asyncType, file, line, message),
+ }
+ // we throw an asyncGomegaHaltExecutionError so that defer GinkgoRecover() can catch this error if the user makes an assertion in a goroutine
+ panic(asyncGomegaHaltExecutionError{})
+ })))
+ }
+ if takesContext {
+ inValues = append(inValues, reflect.ValueOf(assertion.ctx))
+ }
+ for _, arg := range assertion.argsToForward {
+ inValues = append(inValues, reflect.ValueOf(arg))
+ }
+
+ if !isVariadic && numIn != len(inValues) {
+ return nil, assertion.argumentMismatchError(actualType, len(inValues))
+ } else if isVariadic && len(inValues) < numIn-1 {
+ return nil, assertion.argumentMismatchError(actualType, len(inValues))
+ }
+
+ if assertion.mustPassRepeatedly != 1 && assertion.asyncType != AsyncAssertionTypeEventually {
+ return nil, assertion.invalidMustPassRepeatedlyError("it can only be used with Eventually")
+ }
+ if assertion.mustPassRepeatedly < 1 {
+ return nil, assertion.invalidMustPassRepeatedlyError("parameter can't be < 1")
+ }
+
+ return func() (actual interface{}, err error) {
+ var values []reflect.Value
+ assertionFailure = nil
+ defer func() {
+ if numOut == 0 && takesGomega {
+ actual = assertionFailure
+ } else {
+ actual, err = assertion.processReturnValues(values)
+ _, isAsyncError := AsPollingSignalError(err)
+ if assertionFailure != nil && !isAsyncError {
+ err = assertionFailure
+ }
+ }
+ if e := recover(); e != nil {
+ if _, isAsyncError := AsPollingSignalError(e); isAsyncError {
+ err = e.(error)
+ } else if assertionFailure == nil {
+ panic(e)
+ }
+ }
+ }()
+ values = actualValue.Call(inValues)
+ return
+ }, nil
+}
+
+func (assertion *AsyncAssertion) afterTimeout() <-chan time.Time {
+ if assertion.timeoutInterval >= 0 {
+ return time.After(assertion.timeoutInterval)
+ }
+
+ if assertion.asyncType == AsyncAssertionTypeConsistently {
+ return time.After(assertion.g.DurationBundle.ConsistentlyDuration)
+ } else {
+ if assertion.ctx == nil {
+ return time.After(assertion.g.DurationBundle.EventuallyTimeout)
+ } else {
+ return nil
+ }
+ }
+}
+
+func (assertion *AsyncAssertion) afterPolling() <-chan time.Time {
+ if assertion.pollingInterval >= 0 {
+ return time.After(assertion.pollingInterval)
+ }
+ if assertion.asyncType == AsyncAssertionTypeConsistently {
+ return time.After(assertion.g.DurationBundle.ConsistentlyPollingInterval)
+ } else {
+ return time.After(assertion.g.DurationBundle.EventuallyPollingInterval)
+ }
+}
+
+func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value interface{}) bool {
+ if assertion.actualIsFunc || types.MatchMayChangeInTheFuture(matcher, value) {
+ return false
+ }
+ return true
+}
+
+func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value interface{}) (matches bool, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if _, isAsyncError := AsPollingSignalError(e); isAsyncError {
+ err = e.(error)
+ } else {
+ panic(e)
+ }
+ }
+ }()
+
+ matches, err = matcher.Match(value)
+
+ return
+}
+
+func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
+ timer := time.Now()
+ timeout := assertion.afterTimeout()
+ lock := sync.Mutex{}
+
+ var matches, hasLastValidActual bool
+ var actual, lastValidActual interface{}
+ var actualErr, matcherErr error
+ var oracleMatcherSaysStop bool
+
+ assertion.g.THelper()
+
+ pollActual, buildActualPollerErr := assertion.buildActualPoller()
+ if buildActualPollerErr != nil {
+ assertion.g.Fail(buildActualPollerErr.Error(), 2+assertion.offset)
+ return false
+ }
+
+ actual, actualErr = pollActual()
+ if actualErr == nil {
+ lastValidActual = actual
+ hasLastValidActual = true
+ oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, actual)
+ matches, matcherErr = assertion.pollMatcher(matcher, actual)
+ }
+
+ renderError := func(preamble string, err error) string {
+ message := ""
+ if pollingSignalErr, ok := AsPollingSignalError(err); ok {
+ message = err.Error()
+ for _, attachment := range pollingSignalErr.Attachments {
+ message += fmt.Sprintf("\n%s:\n", attachment.Description)
+ message += format.Object(attachment.Object, 1)
+ }
+ } else {
+ message = preamble + "\n" + format.Object(err, 1)
+ }
+ return message
+ }
+
+ messageGenerator := func() string {
+ // can be called out of band by Ginkgo if the user requests a progress report
+ lock.Lock()
+ defer lock.Unlock()
+ message := ""
+
+ if actualErr == nil {
+ if matcherErr == nil {
+ if desiredMatch != matches {
+ if desiredMatch {
+ message += matcher.FailureMessage(actual)
+ } else {
+ message += matcher.NegatedFailureMessage(actual)
+ }
+ } else {
+ if assertion.asyncType == AsyncAssertionTypeConsistently {
+ message += "There is no failure as the matcher passed to Consistently has not yet failed"
+ } else {
+ message += "There is no failure as the matcher passed to Eventually succeeded on its most recent iteration"
+ }
+ }
+ } else {
+ var fgErr formattedGomegaError
+ if errors.As(actualErr, &fgErr) {
+ message += fgErr.FormattedGomegaError() + "\n"
+ } else {
+ message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr)
+ }
+ }
+ } else {
+ var fgErr formattedGomegaError
+ if errors.As(actualErr, &fgErr) {
+ message += fgErr.FormattedGomegaError() + "\n"
+ } else {
+ message += renderError(fmt.Sprintf("The function passed to %s returned the following error:", assertion.asyncType), actualErr)
+ }
+ if hasLastValidActual {
+ message += fmt.Sprintf("\nAt one point, however, the function did return successfully.\nYet, %s failed because", assertion.asyncType)
+ _, e := matcher.Match(lastValidActual)
+ if e != nil {
+ message += renderError(" the matcher returned the following error:", e)
+ } else {
+ message += " the matcher was not satisfied:\n"
+ if desiredMatch {
+ message += matcher.FailureMessage(lastValidActual)
+ } else {
+ message += matcher.NegatedFailureMessage(lastValidActual)
+ }
+ }
+ }
+ }
+
+ description := assertion.buildDescription(optionalDescription...)
+ return fmt.Sprintf("%s%s", description, message)
+ }
+
+ fail := func(preamble string) {
+ assertion.g.THelper()
+ assertion.g.Fail(fmt.Sprintf("%s after %.3fs.\n%s", preamble, time.Since(timer).Seconds(), messageGenerator()), 3+assertion.offset)
+ }
+
+ var contextDone <-chan struct{}
+ if assertion.ctx != nil {
+ contextDone = assertion.ctx.Done()
+ if v, ok := assertion.ctx.Value("GINKGO_SPEC_CONTEXT").(contextWithAttachProgressReporter); ok {
+ detach := v.AttachProgressReporter(messageGenerator)
+ defer detach()
+ }
+ }
+
+ // Used to count the number of times in a row a step passed
+ passedRepeatedlyCount := 0
+ for {
+ var nextPoll <-chan time.Time = nil
+ var isTryAgainAfterError = false
+
+ for _, err := range []error{actualErr, matcherErr} {
+ if pollingSignalErr, ok := AsPollingSignalError(err); ok {
+ if pollingSignalErr.IsStopTrying() {
+ fail("Told to stop trying")
+ return false
+ }
+ if pollingSignalErr.IsTryAgainAfter() {
+ nextPoll = time.After(pollingSignalErr.TryAgainDuration())
+ isTryAgainAfterError = true
+ }
+ }
+ }
+
+ if actualErr == nil && matcherErr == nil && matches == desiredMatch {
+ if assertion.asyncType == AsyncAssertionTypeEventually {
+ passedRepeatedlyCount += 1
+ if passedRepeatedlyCount == assertion.mustPassRepeatedly {
+ return true
+ }
+ }
+ } else if !isTryAgainAfterError {
+ if assertion.asyncType == AsyncAssertionTypeConsistently {
+ fail("Failed")
+ return false
+ }
+ // Reset the consecutive pass count
+ passedRepeatedlyCount = 0
+ }
+
+ if oracleMatcherSaysStop {
+ if assertion.asyncType == AsyncAssertionTypeEventually {
+ fail("No future change is possible. Bailing out early")
+ return false
+ } else {
+ return true
+ }
+ }
+
+ if nextPoll == nil {
+ nextPoll = assertion.afterPolling()
+ }
+
+ select {
+ case <-nextPoll:
+ a, e := pollActual()
+ lock.Lock()
+ actual, actualErr = a, e
+ lock.Unlock()
+ if actualErr == nil {
+ lock.Lock()
+ lastValidActual = actual
+ hasLastValidActual = true
+ lock.Unlock()
+ oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, actual)
+ m, e := assertion.pollMatcher(matcher, actual)
+ lock.Lock()
+ matches, matcherErr = m, e
+ lock.Unlock()
+ }
+ case <-contextDone:
+ err := context.Cause(assertion.ctx)
+ if err != nil && err != context.Canceled {
+ fail(fmt.Sprintf("Context was cancelled (cause: %s)", err))
+ } else {
+ fail("Context was cancelled")
+ }
+ return false
+ case <-timeout:
+ if assertion.asyncType == AsyncAssertionTypeEventually {
+ fail("Timed out")
+ return false
+ } else {
+ if isTryAgainAfterError {
+ fail("Timed out while waiting on TryAgainAfter")
+ return false
+ }
+ return true
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/vendor/github.com/onsi/gomega/internal/duration_bundle.go
new file mode 100644
index 0000000..6e0d90d
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/duration_bundle.go
@@ -0,0 +1,71 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "time"
+)
+
+type DurationBundle struct {
+ EventuallyTimeout time.Duration
+ EventuallyPollingInterval time.Duration
+ ConsistentlyDuration time.Duration
+ ConsistentlyPollingInterval time.Duration
+}
+
+const (
+ EventuallyTimeoutEnvVarName = "GOMEGA_DEFAULT_EVENTUALLY_TIMEOUT"
+ EventuallyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_EVENTUALLY_POLLING_INTERVAL"
+
+ ConsistentlyDurationEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_DURATION"
+ ConsistentlyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL"
+)
+
+func FetchDefaultDurationBundle() DurationBundle {
+ return DurationBundle{
+ EventuallyTimeout: durationFromEnv(EventuallyTimeoutEnvVarName, time.Second),
+ EventuallyPollingInterval: durationFromEnv(EventuallyPollingIntervalEnvVarName, 10*time.Millisecond),
+
+ ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond),
+ ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond),
+ }
+}
+
+func durationFromEnv(key string, defaultDuration time.Duration) time.Duration {
+ value := os.Getenv(key)
+ if value == "" {
+ return defaultDuration
+ }
+ duration, err := time.ParseDuration(value)
+ if err != nil {
+ panic(fmt.Sprintf("Expected a duration when using %s! Parse error %v", key, err))
+ }
+ return duration
+}
+
+func toDuration(input interface{}) (time.Duration, error) {
+ duration, ok := input.(time.Duration)
+ if ok {
+ return duration, nil
+ }
+
+ value := reflect.ValueOf(input)
+ kind := reflect.TypeOf(input).Kind()
+
+ if reflect.Int <= kind && kind <= reflect.Int64 {
+ return time.Duration(value.Int()) * time.Second, nil
+ } else if reflect.Uint <= kind && kind <= reflect.Uint64 {
+ return time.Duration(value.Uint()) * time.Second, nil
+ } else if reflect.Float32 <= kind && kind <= reflect.Float64 {
+ return time.Duration(value.Float() * float64(time.Second)), nil
+ } else if reflect.String == kind {
+ duration, err := time.ParseDuration(value.String())
+ if err != nil {
+ return 0, fmt.Errorf("%#v is not a valid parsable duration string: %w", input, err)
+ }
+ return duration, nil
+ }
+
+ return 0, fmt.Errorf("%#v is not a valid interval. Must be a time.Duration, a parsable duration string, or a number.", input)
+}
diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go
new file mode 100644
index 0000000..de1f4f3
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/gomega.go
@@ -0,0 +1,129 @@
+package internal
+
+import (
+ "context"
+ "time"
+
+ "github.com/onsi/gomega/types"
+)
+
+type Gomega struct {
+ Fail types.GomegaFailHandler
+ THelper func()
+ DurationBundle DurationBundle
+}
+
+func NewGomega(bundle DurationBundle) *Gomega {
+ return &Gomega{
+ Fail: nil,
+ THelper: nil,
+ DurationBundle: bundle,
+ }
+}
+
+func (g *Gomega) IsConfigured() bool {
+ return g.Fail != nil && g.THelper != nil
+}
+
+func (g *Gomega) ConfigureWithFailHandler(fail types.GomegaFailHandler) *Gomega {
+ g.Fail = fail
+ g.THelper = func() {}
+ return g
+}
+
+func (g *Gomega) ConfigureWithT(t types.GomegaTestingT) *Gomega {
+ g.Fail = func(message string, _ ...int) {
+ t.Helper()
+ t.Fatalf("\n%s", message)
+ }
+ g.THelper = t.Helper
+ return g
+}
+
+func (g *Gomega) Ω(actual interface{}, extra ...interface{}) types.Assertion {
+ return g.ExpectWithOffset(0, actual, extra...)
+}
+
+func (g *Gomega) Expect(actual interface{}, extra ...interface{}) types.Assertion {
+ return g.ExpectWithOffset(0, actual, extra...)
+}
+
+func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) types.Assertion {
+ return NewAssertion(actual, g, offset, extra...)
+}
+
+func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...)
+}
+
+func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...)
+}
+
+func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...)
+}
+
+func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...)
+}
+
+func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ baseOffset := 3
+ timeoutInterval := -time.Duration(1)
+ pollingInterval := -time.Duration(1)
+ intervals := []interface{}{}
+ var ctx context.Context
+
+ actual := actualOrCtx
+ startingIndex := 0
+ if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 {
+ // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration
+ // this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual
+ if _, err := toDuration(args[0]); err != nil {
+ ctx = actualOrCtx.(context.Context)
+ actual = args[0]
+ startingIndex = 1
+ }
+ }
+
+ for _, arg := range args[startingIndex:] {
+ switch v := arg.(type) {
+ case context.Context:
+ ctx = v
+ default:
+ intervals = append(intervals, arg)
+ }
+ }
+ var err error
+ if len(intervals) > 0 {
+ timeoutInterval, err = toDuration(intervals[0])
+ if err != nil {
+ g.Fail(err.Error(), offset+baseOffset)
+ }
+ }
+ if len(intervals) > 1 {
+ pollingInterval, err = toDuration(intervals[1])
+ if err != nil {
+ g.Fail(err.Error(), offset+baseOffset)
+ }
+ }
+
+ return NewAsyncAssertion(asyncAssertionType, actual, g, timeoutInterval, pollingInterval, 1, ctx, offset)
+}
+
+func (g *Gomega) SetDefaultEventuallyTimeout(t time.Duration) {
+ g.DurationBundle.EventuallyTimeout = t
+}
+
+func (g *Gomega) SetDefaultEventuallyPollingInterval(t time.Duration) {
+ g.DurationBundle.EventuallyPollingInterval = t
+}
+
+func (g *Gomega) SetDefaultConsistentlyDuration(t time.Duration) {
+ g.DurationBundle.ConsistentlyDuration = t
+}
+
+func (g *Gomega) SetDefaultConsistentlyPollingInterval(t time.Duration) {
+ g.DurationBundle.ConsistentlyPollingInterval = t
+}
diff --git a/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go b/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go
new file mode 100644
index 0000000..6864055
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go
@@ -0,0 +1,48 @@
+//go:build go1.16
+// +build go1.16
+
+// Package gutil is a replacement for ioutil, which should not be used in new
+// code as of Go 1.16. With Go 1.16 and higher, this implementation
+// uses the ioutil replacement functions in "io" and "os" with some
+// Gomega specifics. This means that we should not get deprecation warnings
+// for ioutil when they are added.
+package gutil
+
+import (
+ "io"
+ "os"
+)
+
+func NopCloser(r io.Reader) io.ReadCloser {
+ return io.NopCloser(r)
+}
+
+func ReadAll(r io.Reader) ([]byte, error) {
+ return io.ReadAll(r)
+}
+
+func ReadDir(dirname string) ([]string, error) {
+ entries, err := os.ReadDir(dirname)
+ if err != nil {
+ return nil, err
+ }
+
+ var names []string
+ for _, entry := range entries {
+ names = append(names, entry.Name())
+ }
+
+ return names, nil
+}
+
+func ReadFile(filename string) ([]byte, error) {
+ return os.ReadFile(filename)
+}
+
+func MkdirTemp(dir, pattern string) (string, error) {
+ return os.MkdirTemp(dir, pattern)
+}
+
+func WriteFile(filename string, data []byte) error {
+ return os.WriteFile(filename, data, 0644)
+}
diff --git a/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go b/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go
new file mode 100644
index 0000000..5c0ce1e
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go
@@ -0,0 +1,47 @@
+//go:build !go1.16
+// +build !go1.16
+
+// Package gutil is a replacement for ioutil, which should not be used in new
+// code as of Go 1.16. With Go 1.15 and lower, this implementation
+// uses the ioutil functions, meaning that although Gomega is not officially
+// supported on these versions, it is still likely to work.
+package gutil
+
+import (
+ "io"
+ "io/ioutil"
+)
+
+func NopCloser(r io.Reader) io.ReadCloser {
+ return ioutil.NopCloser(r)
+}
+
+func ReadAll(r io.Reader) ([]byte, error) {
+ return ioutil.ReadAll(r)
+}
+
+func ReadDir(dirname string) ([]string, error) {
+ files, err := ioutil.ReadDir(dirname)
+ if err != nil {
+ return nil, err
+ }
+
+ var names []string
+ for _, file := range files {
+ names = append(names, file.Name())
+ }
+
+ return names, nil
+}
+
+func ReadFile(filename string) ([]byte, error) {
+ return ioutil.ReadFile(filename)
+}
+
+func MkdirTemp(dir, pattern string) (string, error) {
+ return ioutil.TempDir(dir, pattern)
+}
+
+func WriteFile(filename string, data []byte) error {
+ return ioutil.WriteFile(filename, data, 0644)
+}
diff --git a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
new file mode 100644
index 0000000..83b04b1
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
@@ -0,0 +1,106 @@
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "time"
+)
+
+type PollingSignalErrorType int
+
+const (
+ PollingSignalErrorTypeStopTrying PollingSignalErrorType = iota
+ PollingSignalErrorTypeTryAgainAfter
+)
+
+type PollingSignalError interface {
+ error
+ Wrap(err error) PollingSignalError
+ Attach(description string, obj any) PollingSignalError
+ Now()
+}
+
+var StopTrying = func(message string) PollingSignalError {
+ return &PollingSignalErrorImpl{
+ message: message,
+ pollingSignalErrorType: PollingSignalErrorTypeStopTrying,
+ }
+}
+
+var TryAgainAfter = func(duration time.Duration) PollingSignalError {
+ return &PollingSignalErrorImpl{
+ message: fmt.Sprintf("told to try again after %s", duration),
+ duration: duration,
+ pollingSignalErrorType: PollingSignalErrorTypeTryAgainAfter,
+ }
+}
+
+type PollingSignalErrorAttachment struct {
+ Description string
+ Object any
+}
+
+type PollingSignalErrorImpl struct {
+ message string
+ wrappedErr error
+ pollingSignalErrorType PollingSignalErrorType
+ duration time.Duration
+ Attachments []PollingSignalErrorAttachment
+}
+
+func (s *PollingSignalErrorImpl) Wrap(err error) PollingSignalError {
+ s.wrappedErr = err
+ return s
+}
+
+func (s *PollingSignalErrorImpl) Attach(description string, obj any) PollingSignalError {
+ s.Attachments = append(s.Attachments, PollingSignalErrorAttachment{description, obj})
+ return s
+}
+
+func (s *PollingSignalErrorImpl) Error() string {
+ if s.wrappedErr == nil {
+ return s.message
+ } else {
+ return s.message + ": " + s.wrappedErr.Error()
+ }
+}
+
+func (s *PollingSignalErrorImpl) Unwrap() error {
+ if s == nil {
+ return nil
+ }
+ return s.wrappedErr
+}
+
+func (s *PollingSignalErrorImpl) Now() {
+ panic(s)
+}
+
+func (s *PollingSignalErrorImpl) IsStopTrying() bool {
+ return s.pollingSignalErrorType == PollingSignalErrorTypeStopTrying
+}
+
+func (s *PollingSignalErrorImpl) IsTryAgainAfter() bool {
+ return s.pollingSignalErrorType == PollingSignalErrorTypeTryAgainAfter
+}
+
+func (s *PollingSignalErrorImpl) TryAgainDuration() time.Duration {
+ return s.duration
+}
+
+func AsPollingSignalError(actual interface{}) (*PollingSignalErrorImpl, bool) {
+ if actual == nil {
+ return nil, false
+ }
+ if actualErr, ok := actual.(error); ok {
+ var target *PollingSignalErrorImpl
+ if errors.As(actualErr, &target) {
+ return target, true
+ } else {
+ return nil, false
+ }
+ }
+
+ return nil, false
+}
diff --git a/vendor/github.com/onsi/gomega/internal/vetoptdesc.go b/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
new file mode 100644
index 0000000..f295876
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
@@ -0,0 +1,22 @@
+package internal
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/types"
+)
+
+// vetOptionalDescription vets the optional description args: if it finds any
+// Gomega matcher at the beginning it panics. This allows for rendering Gomega
+// matchers as part of an optional Description, as long as they're not in the
+// first slot.
+func vetOptionalDescription(assertion string, optionalDescription ...interface{}) {
+ if len(optionalDescription) == 0 {
+ return
+ }
+ if _, isGomegaMatcher := optionalDescription[0].(types.GomegaMatcher); isGomegaMatcher {
+ panic(fmt.Sprintf("%s has a GomegaMatcher as the first element of optionalDescription.\n\t"+
+ "Do you mean to use And/Or/SatisfyAll/SatisfyAny to combine multiple matchers?",
+ assertion))
+ }
+}
diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go
new file mode 100644
index 0000000..7ef27dc
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers.go
@@ -0,0 +1,701 @@
+package gomega
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/onsi/gomega/matchers"
+ "github.com/onsi/gomega/types"
+)
+
+// Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about
+// types when performing comparisons.
+// It is an error for both actual and expected to be nil. Use BeNil() instead.
+func Equal(expected interface{}) types.GomegaMatcher {
+ return &matchers.EqualMatcher{
+ Expected: expected,
+ }
+}
+
+// BeEquivalentTo is more lax than Equal, allowing equality between different types.
+// This is done by converting actual to have the type of expected before
+// attempting equality with reflect.DeepEqual.
+// It is an error for actual and expected to be nil. Use BeNil() instead.
+func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
+ return &matchers.BeEquivalentToMatcher{
+ Expected: expected,
+ }
+}
+
+// BeComparableTo uses gocmp.Equal from github.com/google/go-cmp (instead of reflect.DeepEqual) to perform a deep comparison.
+// You can pass cmp.Option as options.
+// It is an error for actual and expected to be nil. Use BeNil() instead.
+func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatcher {
+ return &matchers.BeComparableToMatcher{
+ Expected: expected,
+ Options: opts,
+ }
+}
+
+// BeIdenticalTo uses the == operator to compare actual with expected.
+// BeIdenticalTo is strict about types when performing comparisons.
+// It is an error for both actual and expected to be nil. Use BeNil() instead.
+func BeIdenticalTo(expected interface{}) types.GomegaMatcher {
+ return &matchers.BeIdenticalToMatcher{
+ Expected: expected,
+ }
+}
+
+// BeNil succeeds if actual is nil
+func BeNil() types.GomegaMatcher {
+ return &matchers.BeNilMatcher{}
+}
+
+// BeTrue succeeds if actual is true
+//
+// In general, it's better to use `BeTrueBecause(reason)` to provide a more useful error message if a true check fails.
+func BeTrue() types.GomegaMatcher {
+ return &matchers.BeTrueMatcher{}
+}
+
+// BeFalse succeeds if actual is false
+//
+// In general, it's better to use `BeFalseBecause(reason)` to provide a more useful error message if a false check fails.
+func BeFalse() types.GomegaMatcher {
+ return &matchers.BeFalseMatcher{}
+}
+
+// BeTrueBecause succeeds if actual is true and displays the provided reason if it is false
+// fmt.Sprintf is used to render the reason
+func BeTrueBecause(format string, args ...any) types.GomegaMatcher {
+ return &matchers.BeTrueMatcher{Reason: fmt.Sprintf(format, args...)}
+}
+
+// BeFalseBecause succeeds if actual is false and displays the provided reason if it is true.
+// fmt.Sprintf is used to render the reason
+func BeFalseBecause(format string, args ...any) types.GomegaMatcher {
+ return &matchers.BeFalseMatcher{Reason: fmt.Sprintf(format, args...)}
+}
+
+// HaveOccurred succeeds if actual is a non-nil error
+// The typical Go error checking pattern looks like:
+//
+// err := SomethingThatMightFail()
+// Expect(err).ShouldNot(HaveOccurred())
+func HaveOccurred() types.GomegaMatcher {
+ return &matchers.HaveOccurredMatcher{}
+}
+
+// Succeed passes if actual is a nil error
+// Succeed is intended to be used with functions that return a single error value. Instead of
+//
+// err := SomethingThatMightFail()
+// Expect(err).ShouldNot(HaveOccurred())
+//
+// You can write:
+//
+// Expect(SomethingThatMightFail()).Should(Succeed())
+//
+// It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect
+// functions automatically trigger failure if any return values after the first return value are non-zero/non-nil.
+// This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass.
+func Succeed() types.GomegaMatcher {
+ return &matchers.SucceedMatcher{}
+}
+
+// MatchError succeeds if actual is a non-nil error that matches the passed in
+// string, error, function, or matcher.
+//
+// These are valid use-cases:
+//
+// When passed a string:
+//
+// Expect(err).To(MatchError("an error"))
+//
+// asserts that err.Error() == "an error"
+//
+// When passed an error:
+//
+// Expect(err).To(MatchError(SomeError))
+//
+// First checks if errors.Is(err, SomeError).
+// If that fails then it checks if reflect.DeepEqual(err, SomeError) repeatedly for err and any errors wrapped by err
+//
+// When passed a matcher:
+//
+// Expect(err).To(MatchError(ContainSubstring("sprocket not found")))
+//
+// the matcher is passed err.Error(). In this case it asserts that err.Error() contains substring "sprocket not found"
+//
+// When passed a func(err) bool and a description:
+//
+// Expect(err).To(MatchError(os.IsNotExist, "IsNotExist"))
+//
+// the function is passed err and matches if the return value is true. The description is required to allow Gomega
+// to print a useful error message.
+//
+// It is an error for err to be nil or an object that does not implement the
+// Error interface
+//
+// The optional second argument is a description of the error function, if used. This is required when passing a function but is ignored in all other cases.
+func MatchError(expected interface{}, functionErrorDescription ...any) types.GomegaMatcher {
+ return &matchers.MatchErrorMatcher{
+ Expected: expected,
+ FuncErrDescription: functionErrorDescription,
+ }
+}
+
+// BeClosed succeeds if actual is a closed channel.
+// It is an error to pass a non-channel to BeClosed, it is also an error to pass nil
+//
+// In order to check whether or not the channel is closed, Gomega must try to read from the channel
+// (even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about
+// values coming down the channel.
+//
+// Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before
+// asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read).
+//
+// Finally, as a corollary: it is an error to check whether or not a send-only channel is closed.
+func BeClosed() types.GomegaMatcher {
+ return &matchers.BeClosedMatcher{}
+}
+
+// Receive succeeds if there is a value to be received on actual.
+// Actual must be a channel (and cannot be a send-only channel) -- anything else is an error.
+//
+// Receive returns immediately and never blocks:
+//
+// - If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
+//
+// - If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
+//
+// - If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail.
+//
+// If you have a go-routine running in the background that will write to channel `c` you can:
+//
+// Eventually(c).Should(Receive())
+//
+// This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`)
+//
+// A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`:
+//
+// Consistently(c).ShouldNot(Receive())
+//
+// You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example:
+//
+// Expect(c).Should(Receive(Equal("foo")))
+//
+// When given a matcher, `Receive` will always fail if there is nothing to be received on the channel.
+//
+// Passing Receive a matcher is especially useful when paired with Eventually:
+//
+// Eventually(c).Should(Receive(ContainSubstring("bar")))
+//
+// will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
+//
+// Furthermore, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
+//
+// var myThing thing
+// Eventually(thingChan).Should(Receive(&myThing))
+// Expect(myThing.Sprocket).Should(Equal("foo"))
+// Expect(myThing.IsValid()).Should(BeTrue())
+//
+// Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received,
+// you can pass a pointer to a variable of the approriate type first, and second a matcher:
+//
+// var myThing thing
+// Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar")))
+func Receive(args ...interface{}) types.GomegaMatcher {
+ return &matchers.ReceiveMatcher{
+ Args: args,
+ }
+}
+
+// BeSent succeeds if a value can be sent to actual.
+// Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error.
+// In addition, actual must not be closed.
+//
+// BeSent never blocks:
+//
+// - If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately
+// - If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout
+// - If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately
+//
+// Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
+// Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
+func BeSent(arg interface{}) types.GomegaMatcher {
+ return &matchers.BeSentMatcher{
+ Arg: arg,
+ }
+}
+
+// MatchRegexp succeeds if actual is a string or stringer that matches the
+// passed-in regexp. Optional arguments can be provided to construct a regexp
+// via fmt.Sprintf().
+func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
+ return &matchers.MatchRegexpMatcher{
+ Regexp: regexp,
+ Args: args,
+ }
+}
+
+// ContainSubstring succeeds if actual is a string or stringer that contains the
+// passed-in substring. Optional arguments can be provided to construct the substring
+// via fmt.Sprintf().
+func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
+ return &matchers.ContainSubstringMatcher{
+ Substr: substr,
+ Args: args,
+ }
+}
+
+// HavePrefix succeeds if actual is a string or stringer that contains the
+// passed-in string as a prefix. Optional arguments can be provided to construct
+// via fmt.Sprintf().
+func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
+ return &matchers.HavePrefixMatcher{
+ Prefix: prefix,
+ Args: args,
+ }
+}
+
+// HaveSuffix succeeds if actual is a string or stringer that contains the
+// passed-in string as a suffix. Optional arguments can be provided to construct
+// via fmt.Sprintf().
+func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
+ return &matchers.HaveSuffixMatcher{
+ Suffix: suffix,
+ Args: args,
+ }
+}
+
+// MatchJSON succeeds if actual is a string or stringer of JSON that matches
+// the expected JSON. The JSONs are decoded and the resulting objects are compared via
+// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
+func MatchJSON(json interface{}) types.GomegaMatcher {
+ return &matchers.MatchJSONMatcher{
+ JSONToMatch: json,
+ }
+}
+
+// MatchXML succeeds if actual is a string or stringer of XML that matches
+// the expected XML. The XMLs are decoded and the resulting objects are compared via
+// reflect.DeepEqual so things like whitespaces shouldn't matter.
+func MatchXML(xml interface{}) types.GomegaMatcher {
+ return &matchers.MatchXMLMatcher{
+ XMLToMatch: xml,
+ }
+}
+
+// MatchYAML succeeds if actual is a string or stringer of YAML that matches
+// the expected YAML. The YAML's are decoded and the resulting objects are compared via
+// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
+func MatchYAML(yaml interface{}) types.GomegaMatcher {
+ return &matchers.MatchYAMLMatcher{
+ YAMLToMatch: yaml,
+ }
+}
+
+// BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice.
+func BeEmpty() types.GomegaMatcher {
+ return &matchers.BeEmptyMatcher{}
+}
+
+// HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice.
+func HaveLen(count int) types.GomegaMatcher {
+ return &matchers.HaveLenMatcher{
+ Count: count,
+ }
+}
+
+// HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice.
+func HaveCap(count int) types.GomegaMatcher {
+ return &matchers.HaveCapMatcher{
+ Count: count,
+ }
+}
+
+// BeZero succeeds if actual is the zero value for its type or if actual is nil.
+func BeZero() types.GomegaMatcher {
+ return &matchers.BeZeroMatcher{}
+}
+
+// ContainElement succeeds if actual contains the passed in element. By default
+// ContainElement() uses Equal() to perform the match, however a matcher can be
+// passed in instead:
+//
+// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar")))
+//
+// Actual must be an array, slice or map. For maps, ContainElement searches
+// through the map's values.
+//
+// If you want to have a copy of the matching element(s) found you can pass a
+// pointer to a variable of the appropriate type. If the variable isn't a slice
+// or map, then exactly one match will be expected and returned. If the variable
+// is a slice or map, then at least one match is expected and all matches will be
+// stored in the variable.
+//
+// var findings []string
+// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubString("Bar", &findings)))
+func ContainElement(element interface{}, result ...interface{}) types.GomegaMatcher {
+ return &matchers.ContainElementMatcher{
+ Element: element,
+ Result: result,
+ }
+}
+
+// BeElementOf succeeds if actual is contained in the passed in elements.
+// BeElementOf() always uses Equal() to perform the match.
+// When the passed in elements are comprised of a single element that is either an Array or Slice, BeElementOf() behaves
+// as the reverse of ContainElement() that operates with Equal() to perform the match.
+//
+// Expect(2).Should(BeElementOf([]int{1, 2}))
+// Expect(2).Should(BeElementOf([2]int{1, 2}))
+//
+// Otherwise, BeElementOf() provides a syntactic sugar for Or(Equal(_), Equal(_), ...):
+//
+// Expect(2).Should(BeElementOf(1, 2))
+//
+// Actual must be typed.
+func BeElementOf(elements ...interface{}) types.GomegaMatcher {
+ return &matchers.BeElementOfMatcher{
+ Elements: elements,
+ }
+}
+
+// BeKeyOf succeeds if actual is contained in the keys of the passed in map.
+// BeKeyOf() always uses Equal() to perform the match between actual and the map keys.
+//
+// Expect("foo").Should(BeKeyOf(map[string]bool{"foo": true, "bar": false}))
+func BeKeyOf(element interface{}) types.GomegaMatcher {
+ return &matchers.BeKeyOfMatcher{
+ Map: element,
+ }
+}
+
+// ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter.
+// By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
+//
+// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo"))
+// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo"))
+// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo")))
+//
+// Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values.
+//
+// You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it
+// is the only element passed in to ConsistOf:
+//
+// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
+//
+// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
+func ConsistOf(elements ...interface{}) types.GomegaMatcher {
+ return &matchers.ConsistOfMatcher{
+ Elements: elements,
+ }
+}
+
+// HaveExactElements succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter.
+// By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
+//
+// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar"))
+// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", ContainSubstring("Bar")))
+// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements(ContainSubstring("Foo"), ContainSubstring("Foo")))
+//
+// Actual must be an array or slice.
+func HaveExactElements(elements ...interface{}) types.GomegaMatcher {
+ return &matchers.HaveExactElementsMatcher{
+ Elements: elements,
+ }
+}
+
+// ContainElements succeeds if actual contains the passed in elements. The ordering of the elements does not matter.
+// By default ContainElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
+//
+// Expect([]string{"Foo", "FooBar"}).Should(ContainElements("FooBar"))
+// Expect([]string{"Foo", "FooBar"}).Should(ContainElements(ContainSubstring("Bar"), "Foo"))
+//
+// Actual must be an array, slice or map.
+// For maps, ContainElements searches through the map's values.
+func ContainElements(elements ...interface{}) types.GomegaMatcher {
+ return &matchers.ContainElementsMatcher{
+ Elements: elements,
+ }
+}
+
+// HaveEach succeeds if actual solely contains elements that match the passed in element.
+// Please note that if actual is empty, HaveEach always will fail.
+// By default HaveEach() uses Equal() to perform the match, however a
+// matcher can be passed in instead:
+//
+// Expect([]string{"Foo", "FooBar"}).Should(HaveEach(ContainSubstring("Foo")))
+//
+// Actual must be an array, slice or map.
+// For maps, HaveEach searches through the map's values.
+func HaveEach(element interface{}) types.GomegaMatcher {
+ return &matchers.HaveEachMatcher{
+ Element: element,
+ }
+}
+
+// HaveKey succeeds if actual is a map with the passed in key.
+// By default HaveKey uses Equal() to perform the match, however a
+// matcher can be passed in instead:
+//
+// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
+func HaveKey(key interface{}) types.GomegaMatcher {
+ return &matchers.HaveKeyMatcher{
+ Key: key,
+ }
+}
+
+// HaveKeyWithValue succeeds if actual is a map with the passed in key and value.
+// By default HaveKeyWithValue uses Equal() to perform the match, however a
+// matcher can be passed in instead:
+//
+// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
+// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
+func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
+ return &matchers.HaveKeyWithValueMatcher{
+ Key: key,
+ Value: value,
+ }
+}
+
+// HaveField succeeds if actual is a struct and the value at the passed in field
+// matches the passed in matcher. By default HaveField used Equal() to perform the match,
+// however a matcher can be passed in in stead.
+//
+// The field must be a string that resolves to the name of a field in the struct. Structs can be traversed
+// using the '.' delimiter. If the field ends with '()' a method named field is assumed to exist on the struct and is invoked.
+// Such methods must take no arguments and return a single value:
+//
+// type Book struct {
+// Title string
+// Author Person
+// }
+// type Person struct {
+// FirstName string
+// LastName string
+// DOB time.Time
+// }
+// Expect(book).To(HaveField("Title", "Les Miserables"))
+// Expect(book).To(HaveField("Title", ContainSubstring("Les"))
+// Expect(book).To(HaveField("Author.FirstName", Equal("Victor"))
+// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900))
+func HaveField(field string, expected interface{}) types.GomegaMatcher {
+ return &matchers.HaveFieldMatcher{
+ Field: field,
+ Expected: expected,
+ }
+}
+
+// HaveExistingField succeeds if actual is a struct and the specified field
+// exists.
+//
+// HaveExistingField can be combined with HaveField in order to cover use cases
+// with optional fields. HaveField alone would trigger an error in such situations.
+//
+// Expect(MrHarmless).NotTo(And(HaveExistingField("Title"), HaveField("Title", "Supervillain")))
+func HaveExistingField(field string) types.GomegaMatcher {
+ return &matchers.HaveExistingFieldMatcher{
+ Field: field,
+ }
+}
+
+// HaveValue applies the given matcher to the value of actual, optionally and
+// repeatedly dereferencing pointers or taking the concrete value of interfaces.
+// Thus, the matcher will always be applied to non-pointer and non-interface
+// values only. HaveValue will fail with an error if a pointer or interface is
+// nil. It will also fail for more than 31 pointer or interface dereferences to
+// guard against mistakenly applying it to arbitrarily deep linked pointers.
+//
+// HaveValue differs from gstruct.PointTo in that it does not expect actual to
+// be a pointer (as gstruct.PointTo does) but instead also accepts non-pointer
+// and even interface values.
+//
+// actual := 42
+// Expect(actual).To(HaveValue(42))
+// Expect(&actual).To(HaveValue(42))
+func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher {
+ return &matchers.HaveValueMatcher{
+ Matcher: matcher,
+ }
+}
+
+// BeNumerically performs numerical assertions in a type-agnostic way.
+// Actual and expected should be numbers, though the specific type of
+// number is irrelevant (float32, float64, uint8, etc...).
+//
+// There are six, self-explanatory, supported comparators:
+//
+// Expect(1.0).Should(BeNumerically("==", 1))
+// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01))
+// Expect(1.0).Should(BeNumerically(">", 0.9))
+// Expect(1.0).Should(BeNumerically(">=", 1.0))
+// Expect(1.0).Should(BeNumerically("<", 3))
+// Expect(1.0).Should(BeNumerically("<=", 1.0))
+func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher {
+ return &matchers.BeNumericallyMatcher{
+ Comparator: comparator,
+ CompareTo: compareTo,
+ }
+}
+
+// BeTemporally compares time.Time's like BeNumerically
+// Actual and expected must be time.Time. The comparators are the same as for BeNumerically
+//
+// Expect(time.Now()).Should(BeTemporally(">", time.Time{}))
+// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second))
+func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher {
+ return &matchers.BeTemporallyMatcher{
+ Comparator: comparator,
+ CompareTo: compareTo,
+ Threshold: threshold,
+ }
+}
+
+// BeAssignableToTypeOf succeeds if actual is assignable to the type of expected.
+// It will return an error when one of the values is nil.
+//
+// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values
+// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type
+// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
+// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
+func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher {
+ return &matchers.AssignableToTypeOfMatcher{
+ Expected: expected,
+ }
+}
+
+// Panic succeeds if actual is a function that, when invoked, panics.
+// Actual must be a function that takes no arguments and returns no results.
+func Panic() types.GomegaMatcher {
+ return &matchers.PanicMatcher{}
+}
+
+// PanicWith succeeds if actual is a function that, when invoked, panics with a specific value.
+// Actual must be a function that takes no arguments and returns no results.
+//
+// By default PanicWith uses Equal() to perform the match, however a
+// matcher can be passed in instead:
+//
+// Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`)))
+func PanicWith(expected interface{}) types.GomegaMatcher {
+ return &matchers.PanicMatcher{Expected: expected}
+}
+
+// BeAnExistingFile succeeds if a file exists.
+// Actual must be a string representing the abs path to the file being checked.
+func BeAnExistingFile() types.GomegaMatcher {
+ return &matchers.BeAnExistingFileMatcher{}
+}
+
+// BeARegularFile succeeds if a file exists and is a regular file.
+// Actual must be a string representing the abs path to the file being checked.
+func BeARegularFile() types.GomegaMatcher {
+ return &matchers.BeARegularFileMatcher{}
+}
+
+// BeADirectory succeeds if a file exists and is a directory.
+// Actual must be a string representing the abs path to the file being checked.
+func BeADirectory() types.GomegaMatcher {
+ return &matchers.BeADirectoryMatcher{}
+}
+
+// HaveHTTPStatus succeeds if the Status or StatusCode field of an HTTP response matches.
+// Actual must be either a *http.Response or *httptest.ResponseRecorder.
+// Expected must be either an int or a string.
+//
+// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200
+// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found"
+// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204
+func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher {
+ return &matchers.HaveHTTPStatusMatcher{Expected: expected}
+}
+
+// HaveHTTPHeaderWithValue succeeds if the header is found and the value matches.
+// Actual must be either a *http.Response or *httptest.ResponseRecorder.
+// Expected must be a string header name, followed by a header value which
+// can be a string, or another matcher.
+func HaveHTTPHeaderWithValue(header string, value interface{}) types.GomegaMatcher {
+ return &matchers.HaveHTTPHeaderWithValueMatcher{
+ Header: header,
+ Value: value,
+ }
+}
+
+// HaveHTTPBody matches if the body matches.
+// Actual must be either a *http.Response or *httptest.ResponseRecorder.
+// Expected must be either a string, []byte, or other matcher
+func HaveHTTPBody(expected interface{}) types.GomegaMatcher {
+ return &matchers.HaveHTTPBodyMatcher{Expected: expected}
+}
+
+// And succeeds only if all of the given matchers succeed.
+// The matchers are tried in order, and will fail-fast if one doesn't succeed.
+//
+// Expect("hi").To(And(HaveLen(2), Equal("hi"))
+//
+// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+func And(ms ...types.GomegaMatcher) types.GomegaMatcher {
+ return &matchers.AndMatcher{Matchers: ms}
+}
+
+// SatisfyAll is an alias for And().
+//
+// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi")))
+func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher {
+ return And(matchers...)
+}
+
+// Or succeeds if any of the given matchers succeed.
+// The matchers are tried in order and will return immediately upon the first successful match.
+//
+// Expect("hi").To(Or(HaveLen(3), HaveLen(2))
+//
+// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+func Or(ms ...types.GomegaMatcher) types.GomegaMatcher {
+ return &matchers.OrMatcher{Matchers: ms}
+}
+
+// SatisfyAny is an alias for Or().
+//
+// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2))
+func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher {
+ return Or(matchers...)
+}
+
+// Not negates the given matcher; it succeeds if the given matcher fails.
+//
+// Expect(1).To(Not(Equal(2))
+//
+// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+func Not(matcher types.GomegaMatcher) types.GomegaMatcher {
+ return &matchers.NotMatcher{Matcher: matcher}
+}
+
+// WithTransform applies the `transform` to the actual value and matches it against `matcher`.
+// The given transform must be either a function of one parameter that returns one value or a
+// function of one parameter that returns two values, where the second value must be of the
+// error type.
+//
+// var plus1 = func(i int) int { return i + 1 }
+// Expect(1).To(WithTransform(plus1, Equal(2))
+//
+// var failingplus1 = func(i int) (int, error) { return 42, "this does not compute" }
+// Expect(1).To(WithTransform(failingplus1, Equal(2)))
+//
+// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher {
+ return matchers.NewWithTransformMatcher(transform, matcher)
+}
+
+// Satisfy matches the actual value against the `predicate` function.
+// The given predicate must be a function of one paramter that returns bool.
+//
+// var isEven = func(i int) bool { return i%2 == 0 }
+// Expect(2).To(Satisfy(isEven))
+func Satisfy(predicate interface{}) types.GomegaMatcher {
+ return matchers.NewSatisfyMatcher(predicate)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go
new file mode 100644
index 0000000..6bd826a
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/and.go
@@ -0,0 +1,62 @@
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+type AndMatcher struct {
+ Matchers []types.GomegaMatcher
+
+ // state
+ firstFailedMatcher types.GomegaMatcher
+}
+
+func (m *AndMatcher) Match(actual interface{}) (success bool, err error) {
+ m.firstFailedMatcher = nil
+ for _, matcher := range m.Matchers {
+ success, err := matcher.Match(actual)
+ if !success || err != nil {
+ m.firstFailedMatcher = matcher
+ return false, err
+ }
+ }
+ return true, nil
+}
+
+func (m *AndMatcher) FailureMessage(actual interface{}) (message string) {
+ return m.firstFailedMatcher.FailureMessage(actual)
+}
+
+func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ // not the most beautiful list of matchers, but not bad either...
+ return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers))
+}
+
+func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+ /*
+ Example with 3 matchers: A, B, C
+
+ Match evaluates them: T, F, > => F
+ So match is currently F, what should MatchMayChangeInTheFuture() return?
+ Seems like it only depends on B, since currently B MUST change to allow the result to become T
+
+ Match eval: T, T, T => T
+ So match is currently T, what should MatchMayChangeInTheFuture() return?
+ Seems to depend on ANY of them being able to change to F.
+ */
+
+ if m.firstFailedMatcher == nil {
+ // so all matchers succeeded.. Any one of them changing would change the result.
+ for _, matcher := range m.Matchers {
+ if types.MatchMayChangeInTheFuture(matcher, actual) {
+ return true
+ }
+ }
+ return false // none of were going to change
+ }
+ // one of the matchers failed.. it must be able to change in order to affect the result
+ return types.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
new file mode 100644
index 0000000..be48395
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
@@ -0,0 +1,37 @@
+// untested sections: 2
+
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type AssignableToTypeOfMatcher struct {
+ Expected interface{}
+}
+
+func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) {
+ if actual == nil && matcher.Expected == nil {
+ return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
+ } else if matcher.Expected == nil {
+ return false, fmt.Errorf("Refusing to compare type to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
+ } else if actual == nil {
+ return false, nil
+ }
+
+ actualType := reflect.TypeOf(actual)
+ expectedType := reflect.TypeOf(matcher.Expected)
+
+ return actualType.AssignableTo(expectedType), nil
+}
+
+func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string {
+ return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected))
+}
+
+func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string {
+ return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected))
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/attributes_slice.go b/vendor/github.com/onsi/gomega/matchers/attributes_slice.go
new file mode 100644
index 0000000..355b362
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/attributes_slice.go
@@ -0,0 +1,14 @@
+package matchers
+
+import (
+ "encoding/xml"
+ "strings"
+)
+
+type attributesSlice []xml.Attr
+
+func (attrs attributesSlice) Len() int { return len(attrs) }
+func (attrs attributesSlice) Less(i, j int) bool {
+ return strings.Compare(attrs[i].Name.Local, attrs[j].Name.Local) == -1
+}
+func (attrs attributesSlice) Swap(i, j int) { attrs[i], attrs[j] = attrs[j], attrs[i] }
diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
new file mode 100644
index 0000000..93d4497
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
@@ -0,0 +1,56 @@
+// untested sections: 5
+
+package matchers
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/onsi/gomega/format"
+)
+
+type notADirectoryError struct {
+ os.FileInfo
+}
+
+func (t notADirectoryError) Error() string {
+ fileInfo := os.FileInfo(t)
+ switch {
+ case fileInfo.Mode().IsRegular():
+ return "file is a regular file"
+ default:
+ return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String())
+ }
+}
+
+type BeADirectoryMatcher struct {
+ expected interface{}
+ err error
+}
+
+func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) {
+ actualFilename, ok := actual.(string)
+ if !ok {
+ return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path")
+ }
+
+ fileInfo, err := os.Stat(actualFilename)
+ if err != nil {
+ matcher.err = err
+ return false, nil
+ }
+
+ if !fileInfo.Mode().IsDir() {
+ matcher.err = notADirectoryError{fileInfo}
+ return false, nil
+ }
+ return true, nil
+}
+
+func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err))
+}
+
+func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not be a directory")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
new file mode 100644
index 0000000..8fefc4d
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
@@ -0,0 +1,56 @@
+// untested sections: 5
+
+package matchers
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/onsi/gomega/format"
+)
+
+type notARegularFileError struct {
+ os.FileInfo
+}
+
+func (t notARegularFileError) Error() string {
+ fileInfo := os.FileInfo(t)
+ switch {
+ case fileInfo.IsDir():
+ return "file is a directory"
+ default:
+ return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String())
+ }
+}
+
+type BeARegularFileMatcher struct {
+ expected interface{}
+ err error
+}
+
+func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) {
+ actualFilename, ok := actual.(string)
+ if !ok {
+ return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path")
+ }
+
+ fileInfo, err := os.Stat(actualFilename)
+ if err != nil {
+ matcher.err = err
+ return false, nil
+ }
+
+ if !fileInfo.Mode().IsRegular() {
+ matcher.err = notARegularFileError{fileInfo}
+ return false, nil
+ }
+ return true, nil
+}
+
+func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err))
+}
+
+func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not be a regular file")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
new file mode 100644
index 0000000..e2bdd28
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
@@ -0,0 +1,40 @@
+// untested sections: 3
+
+package matchers
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeAnExistingFileMatcher struct {
+ expected interface{}
+}
+
+func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) {
+ actualFilename, ok := actual.(string)
+ if !ok {
+ return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path")
+ }
+
+ if _, err = os.Stat(actualFilename); err != nil {
+ switch {
+ case os.IsNotExist(err):
+ return false, nil
+ default:
+ return false, err
+ }
+ }
+
+ return true, nil
+}
+
+func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to exist")
+}
+
+func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to exist")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
new file mode 100644
index 0000000..f13c244
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
@@ -0,0 +1,48 @@
+// untested sections: 2
+
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeClosedMatcher struct {
+}
+
+func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isChan(actual) {
+ return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1))
+ }
+
+ channelType := reflect.TypeOf(actual)
+ channelValue := reflect.ValueOf(actual)
+
+ if channelType.ChanDir() == reflect.SendDir {
+ return false, fmt.Errorf("BeClosed matcher cannot determine if a send-only channel is closed or open. Got:\n%s", format.Object(actual, 1))
+ }
+
+ winnerIndex, _, open := reflect.Select([]reflect.SelectCase{
+ {Dir: reflect.SelectRecv, Chan: channelValue},
+ {Dir: reflect.SelectDefault},
+ })
+
+ var closed bool
+ if winnerIndex == 0 {
+ closed = !open
+ } else if winnerIndex == 1 {
+ closed = false
+ }
+
+ return closed, nil
+}
+
+func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to be closed")
+}
+
+func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to be open")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
new file mode 100644
index 0000000..4e38978
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
@@ -0,0 +1,49 @@
+package matchers
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/onsi/gomega/format"
+)
+
+type BeComparableToMatcher struct {
+ Expected interface{}
+ Options cmp.Options
+}
+
+func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, matchErr error) {
+ if actual == nil && matcher.Expected == nil {
+ return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
+ }
+ // Shortcut for byte slices.
+ // Comparing long byte slices with reflect.DeepEqual is very slow,
+ // so use bytes.Equal if actual and expected are both byte slices.
+ if actualByteSlice, ok := actual.([]byte); ok {
+ if expectedByteSlice, ok := matcher.Expected.([]byte); ok {
+ return bytes.Equal(actualByteSlice, expectedByteSlice), nil
+ }
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ success = false
+ if err, ok := r.(error); ok {
+ matchErr = err
+ } else if errMsg, ok := r.(string); ok {
+ matchErr = fmt.Errorf(errMsg)
+ }
+ }
+ }()
+
+ return cmp.Equal(actual, matcher.Expected, matcher.Options...), nil
+}
+
+func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) {
+ return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...))
+}
+
+func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to be comparable to", matcher.Expected)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
new file mode 100644
index 0000000..9ee75a5
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
@@ -0,0 +1,43 @@
+// untested sections: 1
+
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeElementOfMatcher struct {
+ Elements []interface{}
+}
+
+func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err error) {
+ if reflect.TypeOf(actual) == nil {
+ return false, fmt.Errorf("BeElement matcher expects actual to be typed")
+ }
+
+ var lastError error
+ for _, m := range flatten(matcher.Elements) {
+ matcher := &EqualMatcher{Expected: m}
+ success, err := matcher.Match(actual)
+ if err != nil {
+ lastError = err
+ continue
+ }
+ if success {
+ return true, nil
+ }
+ }
+
+ return false, lastError
+}
+
+func (matcher *BeElementOfMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to be an element of", presentable(matcher.Elements))
+}
+
+func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to be an element of", presentable(matcher.Elements))
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
new file mode 100644
index 0000000..527c1a1
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
@@ -0,0 +1,29 @@
+// untested sections: 2
+
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeEmptyMatcher struct {
+}
+
+func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) {
+ length, ok := lengthOf(actual)
+ if !ok {
+ return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1))
+ }
+
+ return length == 0, nil
+}
+
+func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to be empty")
+}
+
+func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to be empty")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
new file mode 100644
index 0000000..263627f
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
@@ -0,0 +1,36 @@
+// untested sections: 2
+
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeEquivalentToMatcher struct {
+ Expected interface{}
+}
+
+func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) {
+ if actual == nil && matcher.Expected == nil {
+ return false, fmt.Errorf("Both actual and expected must not be nil.")
+ }
+
+ convertedActual := actual
+
+ if actual != nil && matcher.Expected != nil && reflect.TypeOf(actual).ConvertibleTo(reflect.TypeOf(matcher.Expected)) {
+ convertedActual = reflect.ValueOf(actual).Convert(reflect.TypeOf(matcher.Expected)).Interface()
+ }
+
+ return reflect.DeepEqual(convertedActual, matcher.Expected), nil
+}
+
+func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to be equivalent to", matcher.Expected)
+}
+
+func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to be equivalent to", matcher.Expected)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
new file mode 100644
index 0000000..8ee2b1c
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
@@ -0,0 +1,37 @@
+// untested sections: 2
+
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeFalseMatcher struct {
+ Reason string
+}
+
+func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isBool(actual) {
+ return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1))
+ }
+
+ return actual == false, nil
+}
+
+func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) {
+ if matcher.Reason == "" {
+ return format.Message(actual, "to be false")
+ } else {
+ return matcher.Reason
+ }
+}
+
+func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ if matcher.Reason == "" {
+ return format.Message(actual, "not to be false")
+ } else {
+ return fmt.Sprintf(`Expected not false but got false\nNegation of "%s" failed`, matcher.Reason)
+ }
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go
new file mode 100644
index 0000000..631ce11
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go
@@ -0,0 +1,39 @@
+// untested sections: 2
+
+package matchers
+
+import (
+ "fmt"
+ "runtime"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeIdenticalToMatcher struct {
+ Expected interface{}
+}
+
+func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, matchErr error) {
+ if actual == nil && matcher.Expected == nil {
+ return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ success = false
+ matchErr = nil
+ }
+ }
+ }()
+
+ return actual == matcher.Expected, nil
+}
+
+func (matcher *BeIdenticalToMatcher) FailureMessage(actual interface{}) string {
+ return format.Message(actual, "to be identical to", matcher.Expected)
+}
+
+func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual interface{}) string {
+ return format.Message(actual, "not to be identical to", matcher.Expected)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
new file mode 100644
index 0000000..449a291
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
@@ -0,0 +1,45 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeKeyOfMatcher struct {
+ Map interface{}
+}
+
+func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isMap(matcher.Map) {
+ return false, fmt.Errorf("BeKeyOf matcher needs expected to be a map type")
+ }
+
+ if reflect.TypeOf(actual) == nil {
+ return false, fmt.Errorf("BeKeyOf matcher expects actual to be typed")
+ }
+
+ var lastError error
+ for _, key := range reflect.ValueOf(matcher.Map).MapKeys() {
+ matcher := &EqualMatcher{Expected: key.Interface()}
+ success, err := matcher.Match(actual)
+ if err != nil {
+ lastError = err
+ continue
+ }
+ if success {
+ return true, nil
+ }
+ }
+
+ return false, lastError
+}
+
+func (matcher *BeKeyOfMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to be a key of", presentable(valuesOf(matcher.Map)))
+}
+
+func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to be a key of", presentable(valuesOf(matcher.Map)))
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
new file mode 100644
index 0000000..551d99d
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
@@ -0,0 +1,20 @@
+// untested sections: 2
+
+package matchers
+
+import "github.com/onsi/gomega/format"
+
+type BeNilMatcher struct {
+}
+
+func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) {
+ return isNil(actual), nil
+}
+
+func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to be nil")
+}
+
+func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to be nil")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
new file mode 100644
index 0000000..100735d
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
@@ -0,0 +1,134 @@
+// untested sections: 4
+
+package matchers
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeNumericallyMatcher struct {
+ Comparator string
+ CompareTo []interface{}
+}
+
+func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) {
+ return matcher.FormatFailureMessage(actual, false)
+}
+
+func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return matcher.FormatFailureMessage(actual, true)
+}
+
+func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual interface{}, negated bool) (message string) {
+ if len(matcher.CompareTo) == 1 {
+ message = fmt.Sprintf("to be %s", matcher.Comparator)
+ } else {
+ message = fmt.Sprintf("to be within %v of %s", matcher.CompareTo[1], matcher.Comparator)
+ }
+ if negated {
+ message = "not " + message
+ }
+ return format.Message(actual, message, matcher.CompareTo[0])
+}
+
+func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) {
+ if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 {
+ return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1))
+ }
+ if !isNumber(actual) {
+ return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(actual, 1))
+ }
+ if !isNumber(matcher.CompareTo[0]) {
+ return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1))
+ }
+ if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) {
+ return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[1], 1))
+ }
+
+ switch matcher.Comparator {
+ case "==", "~", ">", ">=", "<", "<=":
+ default:
+ return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator)
+ }
+
+ if isFloat(actual) || isFloat(matcher.CompareTo[0]) {
+ var secondOperand float64 = 1e-8
+ if len(matcher.CompareTo) == 2 {
+ secondOperand = toFloat(matcher.CompareTo[1])
+ }
+ success = matcher.matchFloats(toFloat(actual), toFloat(matcher.CompareTo[0]), secondOperand)
+ } else if isInteger(actual) {
+ var secondOperand int64 = 0
+ if len(matcher.CompareTo) == 2 {
+ secondOperand = toInteger(matcher.CompareTo[1])
+ }
+ success = matcher.matchIntegers(toInteger(actual), toInteger(matcher.CompareTo[0]), secondOperand)
+ } else if isUnsignedInteger(actual) {
+ var secondOperand uint64 = 0
+ if len(matcher.CompareTo) == 2 {
+ secondOperand = toUnsignedInteger(matcher.CompareTo[1])
+ }
+ success = matcher.matchUnsignedIntegers(toUnsignedInteger(actual), toUnsignedInteger(matcher.CompareTo[0]), secondOperand)
+ } else {
+ return false, fmt.Errorf("Failed to compare:\n%s\n%s:\n%s", format.Object(actual, 1), matcher.Comparator, format.Object(matcher.CompareTo[0], 1))
+ }
+
+ return success, nil
+}
+
+func (matcher *BeNumericallyMatcher) matchIntegers(actual, compareTo, threshold int64) (success bool) {
+ switch matcher.Comparator {
+ case "==", "~":
+ diff := actual - compareTo
+ return -threshold <= diff && diff <= threshold
+ case ">":
+ return (actual > compareTo)
+ case ">=":
+ return (actual >= compareTo)
+ case "<":
+ return (actual < compareTo)
+ case "<=":
+ return (actual <= compareTo)
+ }
+ return false
+}
+
+func (matcher *BeNumericallyMatcher) matchUnsignedIntegers(actual, compareTo, threshold uint64) (success bool) {
+ switch matcher.Comparator {
+ case "==", "~":
+ if actual < compareTo {
+ actual, compareTo = compareTo, actual
+ }
+ return actual-compareTo <= threshold
+ case ">":
+ return (actual > compareTo)
+ case ">=":
+ return (actual >= compareTo)
+ case "<":
+ return (actual < compareTo)
+ case "<=":
+ return (actual <= compareTo)
+ }
+ return false
+}
+
+func (matcher *BeNumericallyMatcher) matchFloats(actual, compareTo, threshold float64) (success bool) {
+ switch matcher.Comparator {
+ case "~":
+ return math.Abs(actual-compareTo) <= threshold
+ case "==":
+ return (actual == compareTo)
+ case ">":
+ return (actual > compareTo)
+ case ">=":
+ return (actual >= compareTo)
+ case "<":
+ return (actual < compareTo)
+ case "<=":
+ return (actual <= compareTo)
+ }
+ return false
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
new file mode 100644
index 0000000..cf582a3
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
@@ -0,0 +1,73 @@
+// untested sections: 3
+
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeSentMatcher struct {
+ Arg interface{}
+ channelClosed bool
+}
+
+func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isChan(actual) {
+ return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1))
+ }
+
+ channelType := reflect.TypeOf(actual)
+ channelValue := reflect.ValueOf(actual)
+
+ if channelType.ChanDir() == reflect.RecvDir {
+ return false, fmt.Errorf("BeSent matcher cannot be passed a receive-only channel. Got:\n%s", format.Object(actual, 1))
+ }
+
+ argType := reflect.TypeOf(matcher.Arg)
+ assignable := argType.AssignableTo(channelType.Elem())
+
+ if !assignable {
+ return false, fmt.Errorf("Cannot pass:\n%s to the channel:\n%s\nThe types don't match.", format.Object(matcher.Arg, 1), format.Object(actual, 1))
+ }
+
+ argValue := reflect.ValueOf(matcher.Arg)
+
+ defer func() {
+ if e := recover(); e != nil {
+ success = false
+ err = fmt.Errorf("Cannot send to a closed channel")
+ matcher.channelClosed = true
+ }
+ }()
+
+ winnerIndex, _, _ := reflect.Select([]reflect.SelectCase{
+ {Dir: reflect.SelectSend, Chan: channelValue, Send: argValue},
+ {Dir: reflect.SelectDefault},
+ })
+
+ var didSend bool
+ if winnerIndex == 0 {
+ didSend = true
+ }
+
+ return didSend, nil
+}
+
+func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to send:", matcher.Arg)
+}
+
+func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to send:", matcher.Arg)
+}
+
+func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+ if !isChan(actual) {
+ return false
+ }
+
+ return !matcher.channelClosed
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
new file mode 100644
index 0000000..dec4db0
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
@@ -0,0 +1,68 @@
+// untested sections: 3
+
+package matchers
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeTemporallyMatcher struct {
+ Comparator string
+ CompareTo time.Time
+ Threshold []time.Duration
+}
+
+func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo)
+}
+
+func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo)
+}
+
+func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) {
+ // predicate to test for time.Time type
+ isTime := func(t interface{}) bool {
+ _, ok := t.(time.Time)
+ return ok
+ }
+
+ if !isTime(actual) {
+ return false, fmt.Errorf("Expected a time.Time. Got:\n%s", format.Object(actual, 1))
+ }
+
+ switch matcher.Comparator {
+ case "==", "~", ">", ">=", "<", "<=":
+ default:
+ return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator)
+ }
+
+ var threshold = time.Millisecond
+ if len(matcher.Threshold) == 1 {
+ threshold = matcher.Threshold[0]
+ }
+
+ return matcher.matchTimes(actual.(time.Time), matcher.CompareTo, threshold), nil
+}
+
+func (matcher *BeTemporallyMatcher) matchTimes(actual, compareTo time.Time, threshold time.Duration) (success bool) {
+ switch matcher.Comparator {
+ case "==":
+ return actual.Equal(compareTo)
+ case "~":
+ diff := actual.Sub(compareTo)
+ return -threshold <= diff && diff <= threshold
+ case ">":
+ return actual.After(compareTo)
+ case ">=":
+ return !actual.Before(compareTo)
+ case "<":
+ return actual.Before(compareTo)
+ case "<=":
+ return !actual.After(compareTo)
+ }
+ return false
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
new file mode 100644
index 0000000..3576aac
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
@@ -0,0 +1,37 @@
+// untested sections: 2
+
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeTrueMatcher struct {
+ Reason string
+}
+
+func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isBool(actual) {
+ return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1))
+ }
+
+ return actual.(bool), nil
+}
+
+func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) {
+ if matcher.Reason == "" {
+ return format.Message(actual, "to be true")
+ } else {
+ return matcher.Reason
+ }
+}
+
+func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ if matcher.Reason == "" {
+ return format.Message(actual, "not to be true")
+ } else {
+ return fmt.Sprintf(`Expected not true but got true\nNegation of "%s" failed`, matcher.Reason)
+ }
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
new file mode 100644
index 0000000..26196f1
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
@@ -0,0 +1,28 @@
+package matchers
+
+import (
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeZeroMatcher struct {
+}
+
+func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) {
+ if actual == nil {
+ return true, nil
+ }
+ zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface()
+
+ return reflect.DeepEqual(zeroValue, actual), nil
+
+}
+
+func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to be zero-valued")
+}
+
+func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to be zero-valued")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go
new file mode 100644
index 0000000..f69037a
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go
@@ -0,0 +1,153 @@
+// untested sections: 3
+
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph"
+)
+
+type ConsistOfMatcher struct {
+ Elements []interface{}
+ missingElements []interface{}
+ extraElements []interface{}
+}
+
+func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isArrayOrSlice(actual) && !isMap(actual) {
+ return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
+ }
+
+ matchers := matchers(matcher.Elements)
+ values := valuesOf(actual)
+
+ bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours)
+ if err != nil {
+ return false, err
+ }
+
+ edges := bipartiteGraph.LargestMatching()
+ if len(edges) == len(values) && len(edges) == len(matchers) {
+ return true, nil
+ }
+
+ var missingMatchers []interface{}
+ matcher.extraElements, missingMatchers = bipartiteGraph.FreeLeftRight(edges)
+ matcher.missingElements = equalMatchersToElements(missingMatchers)
+
+ return false, nil
+}
+
+func neighbours(value, matcher interface{}) (bool, error) {
+ match, err := matcher.(omegaMatcher).Match(value)
+ return match && err == nil, nil
+}
+
+func equalMatchersToElements(matchers []interface{}) (elements []interface{}) {
+ for _, matcher := range matchers {
+ if equalMatcher, ok := matcher.(*EqualMatcher); ok {
+ elements = append(elements, equalMatcher.Expected)
+ } else if _, ok := matcher.(*BeNilMatcher); ok {
+ elements = append(elements, nil)
+ } else {
+ elements = append(elements, matcher)
+ }
+ }
+ return
+}
+
+func flatten(elems []interface{}) []interface{} {
+ if len(elems) != 1 || !isArrayOrSlice(elems[0]) {
+ return elems
+ }
+
+ value := reflect.ValueOf(elems[0])
+ flattened := make([]interface{}, value.Len())
+ for i := 0; i < value.Len(); i++ {
+ flattened[i] = value.Index(i).Interface()
+ }
+ return flattened
+}
+
+func matchers(expectedElems []interface{}) (matchers []interface{}) {
+ for _, e := range flatten(expectedElems) {
+ if e == nil {
+ matchers = append(matchers, &BeNilMatcher{})
+ } else if matcher, isMatcher := e.(omegaMatcher); isMatcher {
+ matchers = append(matchers, matcher)
+ } else {
+ matchers = append(matchers, &EqualMatcher{Expected: e})
+ }
+ }
+ return
+}
+
+func presentable(elems []interface{}) interface{} {
+ elems = flatten(elems)
+
+ if len(elems) == 0 {
+ return []interface{}{}
+ }
+
+ sv := reflect.ValueOf(elems)
+ firstEl := sv.Index(0)
+ if firstEl.IsNil() {
+ return elems
+ }
+ tt := firstEl.Elem().Type()
+ for i := 1; i < sv.Len(); i++ {
+ el := sv.Index(i)
+ if el.IsNil() || (sv.Index(i).Elem().Type() != tt) {
+ return elems
+ }
+ }
+
+ ss := reflect.MakeSlice(reflect.SliceOf(tt), sv.Len(), sv.Len())
+ for i := 0; i < sv.Len(); i++ {
+ ss.Index(i).Set(sv.Index(i).Elem())
+ }
+
+ return ss.Interface()
+}
+
+func valuesOf(actual interface{}) []interface{} {
+ value := reflect.ValueOf(actual)
+ values := []interface{}{}
+ if isMap(actual) {
+ keys := value.MapKeys()
+ for i := 0; i < value.Len(); i++ {
+ values = append(values, value.MapIndex(keys[i]).Interface())
+ }
+ } else {
+ for i := 0; i < value.Len(); i++ {
+ values = append(values, value.Index(i).Interface())
+ }
+ }
+
+ return values
+}
+
+func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) {
+ message = format.Message(actual, "to consist of", presentable(matcher.Elements))
+ message = appendMissingElements(message, matcher.missingElements)
+ if len(matcher.extraElements) > 0 {
+ message = fmt.Sprintf("%s\nthe extra elements were\n%s", message,
+ format.Object(presentable(matcher.extraElements), 1))
+ }
+ return
+}
+
+func appendMissingElements(message string, missingElements []interface{}) string {
+ if len(missingElements) == 0 {
+ return message
+ }
+ return fmt.Sprintf("%s\nthe missing elements were\n%s", message,
+ format.Object(presentable(missingElements), 1))
+}
+
+func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to consist of", presentable(matcher.Elements))
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
new file mode 100644
index 0000000..3d45c9e
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
@@ -0,0 +1,174 @@
+// untested sections: 2
+
+package matchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type ContainElementMatcher struct {
+ Element interface{}
+ Result []interface{}
+}
+
+func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isArrayOrSlice(actual) && !isMap(actual) {
+ return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
+ }
+
+ var actualT reflect.Type
+ var result reflect.Value
+ switch l := len(matcher.Result); {
+ case l > 1:
+ return false, errors.New("ContainElement matcher expects at most a single optional pointer to store its findings at")
+ case l == 1:
+ if reflect.ValueOf(matcher.Result[0]).Kind() != reflect.Ptr {
+ return false, fmt.Errorf("ContainElement matcher expects a non-nil pointer to store its findings at. Got\n%s",
+ format.Object(matcher.Result[0], 1))
+ }
+ actualT = reflect.TypeOf(actual)
+ resultReference := matcher.Result[0]
+ result = reflect.ValueOf(resultReference).Elem() // what ResultReference points to, to stash away our findings
+ switch result.Kind() {
+ case reflect.Array:
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ reflect.SliceOf(actualT.Elem()).String(), result.Type().String())
+ case reflect.Slice:
+ if !isArrayOrSlice(actual) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ reflect.MapOf(actualT.Key(), actualT.Elem()).String(), result.Type().String())
+ }
+ if !actualT.Elem().AssignableTo(result.Type().Elem()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.String(), result.Type().String())
+ }
+ case reflect.Map:
+ if !isMap(actual) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.String(), result.Type().String())
+ }
+ if !actualT.AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.String(), result.Type().String())
+ }
+ default:
+ if !actualT.Elem().AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.Elem().String(), result.Type().String())
+ }
+ }
+ }
+
+ elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
+ if !elementIsMatcher {
+ elemMatcher = &EqualMatcher{Expected: matcher.Element}
+ }
+
+ value := reflect.ValueOf(actual)
+ var valueAt func(int) interface{}
+
+ var getFindings func() reflect.Value
+ var foundAt func(int)
+
+ if isMap(actual) {
+ keys := value.MapKeys()
+ valueAt = func(i int) interface{} {
+ return value.MapIndex(keys[i]).Interface()
+ }
+ if result.Kind() != reflect.Invalid {
+ fm := reflect.MakeMap(actualT)
+ getFindings = func() reflect.Value {
+ return fm
+ }
+ foundAt = func(i int) {
+ fm.SetMapIndex(keys[i], value.MapIndex(keys[i]))
+ }
+ }
+ } else {
+ valueAt = func(i int) interface{} {
+ return value.Index(i).Interface()
+ }
+ if result.Kind() != reflect.Invalid {
+ var f reflect.Value
+ if result.Kind() == reflect.Slice {
+ f = reflect.MakeSlice(result.Type(), 0, 0)
+ } else {
+ f = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0)
+ }
+ getFindings = func() reflect.Value {
+ return f
+ }
+ foundAt = func(i int) {
+ f = reflect.Append(f, value.Index(i))
+ }
+ }
+ }
+
+ var lastError error
+ for i := 0; i < value.Len(); i++ {
+ elem := valueAt(i)
+ success, err := elemMatcher.Match(elem)
+ if err != nil {
+ lastError = err
+ continue
+ }
+ if success {
+ if result.Kind() == reflect.Invalid {
+ return true, nil
+ }
+ foundAt(i)
+ }
+ }
+
+ // when the expectation isn't interested in the findings except for success
+ // or non-success, then we're done here and return the last matcher error
+ // seen, if any, as well as non-success.
+ if result.Kind() == reflect.Invalid {
+ return false, lastError
+ }
+
+ // pick up any findings the test is interested in as it specified a non-nil
+ // result reference. However, the expection always is that there are at
+ // least one or multiple findings. So, if a result is expected, but we had
+ // no findings, then this is an error.
+ findings := getFindings()
+ if findings.Len() == 0 {
+ return false, lastError
+ }
+
+ // there's just a single finding and the result is neither a slice nor a map
+ // (so it's a scalar): pick the one and only finding and return it in the
+ // place the reference points to.
+ if findings.Len() == 1 && !isArrayOrSlice(result.Interface()) && !isMap(result.Interface()) {
+ if isMap(actual) {
+ miter := findings.MapRange()
+ miter.Next()
+ result.Set(miter.Value())
+ } else {
+ result.Set(findings.Index(0))
+ }
+ return true, nil
+ }
+
+ // at least one or even multiple findings and a the result references a
+ // slice or a map, so all we need to do is to store our findings where the
+ // reference points to.
+ if !findings.Type().AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return multiple findings. Need *%s, got *%s",
+ findings.Type().String(), result.Type().String())
+ }
+ result.Set(findings)
+ return true, nil
+}
+
+func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to contain element matching", matcher.Element)
+}
+
+func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to contain element matching", matcher.Element)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
new file mode 100644
index 0000000..946cd8b
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
@@ -0,0 +1,44 @@
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph"
+)
+
+type ContainElementsMatcher struct {
+ Elements []interface{}
+ missingElements []interface{}
+}
+
+func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isArrayOrSlice(actual) && !isMap(actual) {
+ return false, fmt.Errorf("ContainElements matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
+ }
+
+ matchers := matchers(matcher.Elements)
+ bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(valuesOf(actual), matchers, neighbours)
+ if err != nil {
+ return false, err
+ }
+
+ edges := bipartiteGraph.LargestMatching()
+ if len(edges) == len(matchers) {
+ return true, nil
+ }
+
+ _, missingMatchers := bipartiteGraph.FreeLeftRight(edges)
+ matcher.missingElements = equalMatchersToElements(missingMatchers)
+
+ return false, nil
+}
+
+func (matcher *ContainElementsMatcher) FailureMessage(actual interface{}) (message string) {
+ message = format.Message(actual, "to contain elements", presentable(matcher.Elements))
+ return appendMissingElements(message, matcher.missingElements)
+}
+
+func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to contain elements", presentable(matcher.Elements))
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
new file mode 100644
index 0000000..e725f8c
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
@@ -0,0 +1,40 @@
+// untested sections: 2
+
+package matchers
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/onsi/gomega/format"
+)
+
+type ContainSubstringMatcher struct {
+ Substr string
+ Args []interface{}
+}
+
+func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) {
+ actualString, ok := toString(actual)
+ if !ok {
+ return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
+ }
+
+ return strings.Contains(actualString, matcher.stringToMatch()), nil
+}
+
+func (matcher *ContainSubstringMatcher) stringToMatch() string {
+ stringToMatch := matcher.Substr
+ if len(matcher.Args) > 0 {
+ stringToMatch = fmt.Sprintf(matcher.Substr, matcher.Args...)
+ }
+ return stringToMatch
+}
+
+func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to contain substring", matcher.stringToMatch())
+}
+
+func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to contain substring", matcher.stringToMatch())
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go
new file mode 100644
index 0000000..befb7bd
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go
@@ -0,0 +1,42 @@
+package matchers
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type EqualMatcher struct {
+ Expected interface{}
+}
+
+func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) {
+ if actual == nil && matcher.Expected == nil {
+ return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
+ }
+ // Shortcut for byte slices.
+ // Comparing long byte slices with reflect.DeepEqual is very slow,
+ // so use bytes.Equal if actual and expected are both byte slices.
+ if actualByteSlice, ok := actual.([]byte); ok {
+ if expectedByteSlice, ok := matcher.Expected.([]byte); ok {
+ return bytes.Equal(actualByteSlice, expectedByteSlice), nil
+ }
+ }
+ return reflect.DeepEqual(actual, matcher.Expected), nil
+}
+
+func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) {
+ actualString, actualOK := actual.(string)
+ expectedString, expectedOK := matcher.Expected.(string)
+ if actualOK && expectedOK {
+ return format.MessageWithDiff(actualString, "to equal", expectedString)
+ }
+
+ return format.Message(actual, "to equal", matcher.Expected)
+}
+
+func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to equal", matcher.Expected)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
new file mode 100644
index 0000000..9856752
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
@@ -0,0 +1,30 @@
+// untested sections: 2
+
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type HaveCapMatcher struct {
+ Count int
+}
+
+func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err error) {
+ length, ok := capOf(actual)
+ if !ok {
+ return false, fmt.Errorf("HaveCap matcher expects a array/channel/slice. Got:\n%s", format.Object(actual, 1))
+ }
+
+ return length == matcher.Count, nil
+}
+
+func (matcher *HaveCapMatcher) FailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected\n%s\nto have capacity %d", format.Object(actual, 1), matcher.Count)
+}
+
+func (matcher *HaveCapMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected\n%s\nnot to have capacity %d", format.Object(actual, 1), matcher.Count)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
new file mode 100644
index 0000000..025b6e1
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
@@ -0,0 +1,65 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type HaveEachMatcher struct {
+ Element interface{}
+}
+
+func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isArrayOrSlice(actual) && !isMap(actual) {
+ return false, fmt.Errorf("HaveEach matcher expects an array/slice/map. Got:\n%s",
+ format.Object(actual, 1))
+ }
+
+ elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
+ if !elementIsMatcher {
+ elemMatcher = &EqualMatcher{Expected: matcher.Element}
+ }
+
+ value := reflect.ValueOf(actual)
+ if value.Len() == 0 {
+ return false, fmt.Errorf("HaveEach matcher expects a non-empty array/slice/map. Got:\n%s",
+ format.Object(actual, 1))
+ }
+
+ var valueAt func(int) interface{}
+ if isMap(actual) {
+ keys := value.MapKeys()
+ valueAt = func(i int) interface{} {
+ return value.MapIndex(keys[i]).Interface()
+ }
+ } else {
+ valueAt = func(i int) interface{} {
+ return value.Index(i).Interface()
+ }
+ }
+
+ // if there are no elements, then HaveEach will match.
+ for i := 0; i < value.Len(); i++ {
+ success, err := elemMatcher.Match(valueAt(i))
+ if err != nil {
+ return false, err
+ }
+ if !success {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+// FailureMessage returns a suitable failure message.
+func (matcher *HaveEachMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to contain element matching", matcher.Element)
+}
+
+// NegatedFailureMessage returns a suitable negated failure message.
+func (matcher *HaveEachMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to contain element matching", matcher.Element)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
new file mode 100644
index 0000000..dca5b94
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
@@ -0,0 +1,88 @@
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type mismatchFailure struct {
+ failure string
+ index int
+}
+
+type HaveExactElementsMatcher struct {
+ Elements []interface{}
+ mismatchFailures []mismatchFailure
+ missingIndex int
+ extraIndex int
+}
+
+func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) {
+ matcher.resetState()
+
+ if isMap(actual) {
+ return false, fmt.Errorf("error")
+ }
+
+ matchers := matchers(matcher.Elements)
+ values := valuesOf(actual)
+
+ lenMatchers := len(matchers)
+ lenValues := len(values)
+
+ for i := 0; i < lenMatchers || i < lenValues; i++ {
+ if i >= lenMatchers {
+ matcher.extraIndex = i
+ continue
+ }
+
+ if i >= lenValues {
+ matcher.missingIndex = i
+ return
+ }
+
+ elemMatcher := matchers[i].(omegaMatcher)
+ match, err := elemMatcher.Match(values[i])
+ if err != nil {
+ matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{
+ index: i,
+ failure: err.Error(),
+ })
+ } else if !match {
+ matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{
+ index: i,
+ failure: elemMatcher.FailureMessage(values[i]),
+ })
+ }
+ }
+
+ return matcher.missingIndex+matcher.extraIndex+len(matcher.mismatchFailures) == 0, nil
+}
+
+func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) {
+ message = format.Message(actual, "to have exact elements with", presentable(matcher.Elements))
+ if matcher.missingIndex > 0 {
+ message = fmt.Sprintf("%s\nthe missing elements start from index %d", message, matcher.missingIndex)
+ }
+ if matcher.extraIndex > 0 {
+ message = fmt.Sprintf("%s\nthe extra elements start from index %d", message, matcher.extraIndex)
+ }
+ if len(matcher.mismatchFailures) != 0 {
+ message = fmt.Sprintf("%s\nthe mismatch indexes were:", message)
+ }
+ for _, mismatch := range matcher.mismatchFailures {
+ message = fmt.Sprintf("%s\n%d: %s", message, mismatch.index, mismatch.failure)
+ }
+ return
+}
+
+func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to contain elements", presentable(matcher.Elements))
+}
+
+func (matcher *HaveExactElementsMatcher) resetState() {
+ matcher.mismatchFailures = nil
+ matcher.missingIndex = 0
+ matcher.extraIndex = 0
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
new file mode 100644
index 0000000..b570187
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
@@ -0,0 +1,36 @@
+package matchers
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type HaveExistingFieldMatcher struct {
+ Field string
+}
+
+func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool, err error) {
+ // we don't care about the field's actual value, just about any error in
+ // trying to find the field (or method).
+ _, err = extractField(actual, matcher.Field, "HaveExistingField")
+ if err == nil {
+ return true, nil
+ }
+ var mferr missingFieldError
+ if errors.As(err, &mferr) {
+ // missing field errors aren't errors in this context, but instead
+ // unsuccessful matches.
+ return false, nil
+ }
+ return false, err
+}
+
+func (matcher *HaveExistingFieldMatcher) FailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected\n%s\nto have field '%s'", format.Object(actual, 1), matcher.Field)
+}
+
+func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected\n%s\nnot to have field '%s'", format.Object(actual, 1), matcher.Field)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go
new file mode 100644
index 0000000..6989f78
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_field.go
@@ -0,0 +1,99 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/onsi/gomega/format"
+)
+
+// missingFieldError represents a missing field extraction error that
+// HaveExistingFieldMatcher can ignore, as opposed to other, sever field
+// extraction errors, such as nil pointers, et cetera.
+type missingFieldError string
+
+func (e missingFieldError) Error() string {
+ return string(e)
+}
+
+func extractField(actual interface{}, field string, matchername string) (interface{}, error) {
+ fields := strings.SplitN(field, ".", 2)
+ actualValue := reflect.ValueOf(actual)
+
+ if actualValue.Kind() == reflect.Ptr {
+ actualValue = actualValue.Elem()
+ }
+ if actualValue == (reflect.Value{}) {
+ return nil, fmt.Errorf("%s encountered nil while dereferencing a pointer of type %T.", matchername, actual)
+ }
+
+ if actualValue.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("%s encountered:\n%s\nWhich is not a struct.", matchername, format.Object(actual, 1))
+ }
+
+ var extractedValue reflect.Value
+
+ if strings.HasSuffix(fields[0], "()") {
+ extractedValue = actualValue.MethodByName(strings.TrimSuffix(fields[0], "()"))
+ if extractedValue == (reflect.Value{}) && actualValue.CanAddr() {
+ extractedValue = actualValue.Addr().MethodByName(strings.TrimSuffix(fields[0], "()"))
+ }
+ if extractedValue == (reflect.Value{}) {
+ return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual))
+ }
+ t := extractedValue.Type()
+ if t.NumIn() != 0 || t.NumOut() != 1 {
+ return nil, fmt.Errorf("%s found an invalid method named '%s' in struct of type %T.\nMethods must take no arguments and return exactly one value.", matchername, fields[0], actual)
+ }
+ extractedValue = extractedValue.Call([]reflect.Value{})[0]
+ } else {
+ extractedValue = actualValue.FieldByName(fields[0])
+ if extractedValue == (reflect.Value{}) {
+ return nil, missingFieldError(fmt.Sprintf("%s could not find field named '%s' in struct:\n%s", matchername, fields[0], format.Object(actual, 1)))
+ }
+ }
+
+ if len(fields) == 1 {
+ return extractedValue.Interface(), nil
+ } else {
+ return extractField(extractedValue.Interface(), fields[1], matchername)
+ }
+}
+
+type HaveFieldMatcher struct {
+ Field string
+ Expected interface{}
+
+ extractedField interface{}
+ expectedMatcher omegaMatcher
+}
+
+func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) {
+ matcher.extractedField, err = extractField(actual, matcher.Field, "HaveField")
+ if err != nil {
+ return false, err
+ }
+
+ var isMatcher bool
+ matcher.expectedMatcher, isMatcher = matcher.Expected.(omegaMatcher)
+ if !isMatcher {
+ matcher.expectedMatcher = &EqualMatcher{Expected: matcher.Expected}
+ }
+
+ return matcher.expectedMatcher.Match(matcher.extractedField)
+}
+
+func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) {
+ message = fmt.Sprintf("Value for field '%s' failed to satisfy matcher.\n", matcher.Field)
+ message += matcher.expectedMatcher.FailureMessage(matcher.extractedField)
+
+ return message
+}
+
+func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ message = fmt.Sprintf("Value for field '%s' satisfied matcher, but should not have.\n", matcher.Field)
+ message += matcher.expectedMatcher.NegatedFailureMessage(matcher.extractedField)
+
+ return message
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
new file mode 100644
index 0000000..d14d9e5
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
@@ -0,0 +1,104 @@
+package matchers
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/internal/gutil"
+ "github.com/onsi/gomega/types"
+)
+
+type HaveHTTPBodyMatcher struct {
+ Expected interface{}
+ cachedResponse interface{}
+ cachedBody []byte
+}
+
+func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) {
+ body, err := matcher.body(actual)
+ if err != nil {
+ return false, err
+ }
+
+ switch e := matcher.Expected.(type) {
+ case string:
+ return (&EqualMatcher{Expected: e}).Match(string(body))
+ case []byte:
+ return (&EqualMatcher{Expected: e}).Match(body)
+ case types.GomegaMatcher:
+ return e.Match(body)
+ default:
+ return false, fmt.Errorf("HaveHTTPBody matcher expects string, []byte, or GomegaMatcher. Got:\n%s", format.Object(matcher.Expected, 1))
+ }
+}
+
+func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message string) {
+ body, err := matcher.body(actual)
+ if err != nil {
+ return fmt.Sprintf("failed to read body: %s", err)
+ }
+
+ switch e := matcher.Expected.(type) {
+ case string:
+ return (&EqualMatcher{Expected: e}).FailureMessage(string(body))
+ case []byte:
+ return (&EqualMatcher{Expected: e}).FailureMessage(body)
+ case types.GomegaMatcher:
+ return e.FailureMessage(body)
+ default:
+ return fmt.Sprintf("HaveHTTPBody matcher expects string, []byte, or GomegaMatcher. Got:\n%s", format.Object(matcher.Expected, 1))
+ }
+}
+
+func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ body, err := matcher.body(actual)
+ if err != nil {
+ return fmt.Sprintf("failed to read body: %s", err)
+ }
+
+ switch e := matcher.Expected.(type) {
+ case string:
+ return (&EqualMatcher{Expected: e}).NegatedFailureMessage(string(body))
+ case []byte:
+ return (&EqualMatcher{Expected: e}).NegatedFailureMessage(body)
+ case types.GomegaMatcher:
+ return e.NegatedFailureMessage(body)
+ default:
+ return fmt.Sprintf("HaveHTTPBody matcher expects string, []byte, or GomegaMatcher. Got:\n%s", format.Object(matcher.Expected, 1))
+ }
+}
+
+// body returns the body. It is cached because once we read it in Match()
+// the Reader is closed and it is not readable again in FailureMessage()
+// or NegatedFailureMessage()
+func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) {
+ if matcher.cachedResponse == actual && matcher.cachedBody != nil {
+ return matcher.cachedBody, nil
+ }
+
+ body := func(a *http.Response) ([]byte, error) {
+ if a.Body != nil {
+ defer a.Body.Close()
+ var err error
+ matcher.cachedBody, err = gutil.ReadAll(a.Body)
+ if err != nil {
+ return nil, fmt.Errorf("error reading response body: %w", err)
+ }
+ }
+ return matcher.cachedBody, nil
+ }
+
+ switch a := actual.(type) {
+ case *http.Response:
+ matcher.cachedResponse = a
+ return body(a)
+ case *httptest.ResponseRecorder:
+ matcher.cachedResponse = a
+ return body(a.Result())
+ default:
+ return nil, fmt.Errorf("HaveHTTPBody matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1))
+ }
+
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
new file mode 100644
index 0000000..c256f45
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
@@ -0,0 +1,81 @@
+package matchers
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+type HaveHTTPHeaderWithValueMatcher struct {
+ Header string
+ Value interface{}
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (success bool, err error) {
+ headerValue, err := matcher.extractHeader(actual)
+ if err != nil {
+ return false, err
+ }
+
+ headerMatcher, err := matcher.getSubMatcher()
+ if err != nil {
+ return false, err
+ }
+
+ return headerMatcher.Match(headerValue)
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{}) string {
+ headerValue, err := matcher.extractHeader(actual)
+ if err != nil {
+ panic(err) // protected by Match()
+ }
+
+ headerMatcher, err := matcher.getSubMatcher()
+ if err != nil {
+ panic(err) // protected by Match()
+ }
+
+ diff := format.IndentString(headerMatcher.FailureMessage(headerValue), 1)
+ return fmt.Sprintf("HTTP header %q:\n%s", matcher.Header, diff)
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ headerValue, err := matcher.extractHeader(actual)
+ if err != nil {
+ panic(err) // protected by Match()
+ }
+
+ headerMatcher, err := matcher.getSubMatcher()
+ if err != nil {
+ panic(err) // protected by Match()
+ }
+
+ diff := format.IndentString(headerMatcher.NegatedFailureMessage(headerValue), 1)
+ return fmt.Sprintf("HTTP header %q:\n%s", matcher.Header, diff)
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) getSubMatcher() (types.GomegaMatcher, error) {
+ switch m := matcher.Value.(type) {
+ case string:
+ return &EqualMatcher{Expected: matcher.Value}, nil
+ case types.GomegaMatcher:
+ return m, nil
+ default:
+ return nil, fmt.Errorf("HaveHTTPHeaderWithValue matcher must be passed a string or a GomegaMatcher. Got:\n%s", format.Object(matcher.Value, 1))
+ }
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual interface{}) (string, error) {
+ switch r := actual.(type) {
+ case *http.Response:
+ return r.Header.Get(matcher.Header), nil
+ case *httptest.ResponseRecorder:
+ return r.Result().Header.Get(matcher.Header), nil
+ default:
+ return "", fmt.Errorf("HaveHTTPHeaderWithValue matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1))
+ }
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
new file mode 100644
index 0000000..0f66e46
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
@@ -0,0 +1,96 @@
+package matchers
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "strings"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/internal/gutil"
+)
+
+type HaveHTTPStatusMatcher struct {
+ Expected []interface{}
+}
+
+func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, err error) {
+ var resp *http.Response
+ switch a := actual.(type) {
+ case *http.Response:
+ resp = a
+ case *httptest.ResponseRecorder:
+ resp = a.Result()
+ default:
+ return false, fmt.Errorf("HaveHTTPStatus matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1))
+ }
+
+ if len(matcher.Expected) == 0 {
+ return false, fmt.Errorf("HaveHTTPStatus matcher must be passed an int or a string. Got nothing")
+ }
+
+ for _, expected := range matcher.Expected {
+ switch e := expected.(type) {
+ case int:
+ if resp.StatusCode == e {
+ return true, nil
+ }
+ case string:
+ if resp.Status == e {
+ return true, nil
+ }
+ default:
+ return false, fmt.Errorf("HaveHTTPStatus matcher must be passed int or string types. Got:\n%s", format.Object(expected, 1))
+ }
+ }
+
+ return false, nil
+}
+
+func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "to have HTTP status", matcher.expectedString())
+}
+
+func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "not to have HTTP status", matcher.expectedString())
+}
+
+func (matcher *HaveHTTPStatusMatcher) expectedString() string {
+ var lines []string
+ for _, expected := range matcher.Expected {
+ lines = append(lines, format.Object(expected, 1))
+ }
+ return strings.Join(lines, "\n")
+}
+
+func formatHttpResponse(input interface{}) string {
+ var resp *http.Response
+ switch r := input.(type) {
+ case *http.Response:
+ resp = r
+ case *httptest.ResponseRecorder:
+ resp = r.Result()
+ default:
+ return "cannot format invalid HTTP response"
+ }
+
+ body := ""
+ if resp.Body != nil {
+ defer resp.Body.Close()
+ data, err := gutil.ReadAll(resp.Body)
+ if err != nil {
+ data = []byte("")
+ }
+ body = format.Object(string(data), 0)
+ }
+
+ var s strings.Builder
+ s.WriteString(fmt.Sprintf("%s<%s>: {\n", format.Indent, reflect.TypeOf(input)))
+ s.WriteString(fmt.Sprintf("%s%sStatus: %s\n", format.Indent, format.Indent, format.Object(resp.Status, 0)))
+ s.WriteString(fmt.Sprintf("%s%sStatusCode: %s\n", format.Indent, format.Indent, format.Object(resp.StatusCode, 0)))
+ s.WriteString(fmt.Sprintf("%s%sBody: %s\n", format.Indent, format.Indent, body))
+ s.WriteString(fmt.Sprintf("%s}", format.Indent))
+
+ return s.String()
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
new file mode 100644
index 0000000..00cffec
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
@@ -0,0 +1,56 @@
+// untested sections: 6
+
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type HaveKeyMatcher struct {
+ Key interface{}
+}
+
+func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isMap(actual) {
+ return false, fmt.Errorf("HaveKey matcher expects a map. Got:%s", format.Object(actual, 1))
+ }
+
+ keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher)
+ if !keyIsMatcher {
+ keyMatcher = &EqualMatcher{Expected: matcher.Key}
+ }
+
+ keys := reflect.ValueOf(actual).MapKeys()
+ for i := 0; i < len(keys); i++ {
+ success, err := keyMatcher.Match(keys[i].Interface())
+ if err != nil {
+ return false, fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error())
+ }
+ if success {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
+
+func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) {
+ switch matcher.Key.(type) {
+ case omegaMatcher:
+ return format.Message(actual, "to have key matching", matcher.Key)
+ default:
+ return format.Message(actual, "to have key", matcher.Key)
+ }
+}
+
+func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ switch matcher.Key.(type) {
+ case omegaMatcher:
+ return format.Message(actual, "not to have key matching", matcher.Key)
+ default:
+ return format.Message(actual, "not to have key", matcher.Key)
+ }
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
new file mode 100644
index 0000000..4c59168
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
@@ -0,0 +1,76 @@
+// untested sections:10
+
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type HaveKeyWithValueMatcher struct {
+ Key interface{}
+ Value interface{}
+}
+
+func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isMap(actual) {
+ return false, fmt.Errorf("HaveKeyWithValue matcher expects a map. Got:%s", format.Object(actual, 1))
+ }
+
+ keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher)
+ if !keyIsMatcher {
+ keyMatcher = &EqualMatcher{Expected: matcher.Key}
+ }
+
+ valueMatcher, valueIsMatcher := matcher.Value.(omegaMatcher)
+ if !valueIsMatcher {
+ valueMatcher = &EqualMatcher{Expected: matcher.Value}
+ }
+
+ keys := reflect.ValueOf(actual).MapKeys()
+ for i := 0; i < len(keys); i++ {
+ success, err := keyMatcher.Match(keys[i].Interface())
+ if err != nil {
+ return false, fmt.Errorf("HaveKeyWithValue's key matcher failed with:\n%s%s", format.Indent, err.Error())
+ }
+ if success {
+ actualValue := reflect.ValueOf(actual).MapIndex(keys[i])
+ success, err := valueMatcher.Match(actualValue.Interface())
+ if err != nil {
+ return false, fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error())
+ }
+ return success, nil
+ }
+ }
+
+ return false, nil
+}
+
+func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) {
+ str := "to have {key: value}"
+ if _, ok := matcher.Key.(omegaMatcher); ok {
+ str += " matching"
+ } else if _, ok := matcher.Value.(omegaMatcher); ok {
+ str += " matching"
+ }
+
+ expect := make(map[interface{}]interface{}, 1)
+ expect[matcher.Key] = matcher.Value
+ return format.Message(actual, str, expect)
+}
+
+func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ kStr := "not to have key"
+ if _, ok := matcher.Key.(omegaMatcher); ok {
+ kStr = "not to have key matching"
+ }
+
+ vStr := "or that key's value not be"
+ if _, ok := matcher.Value.(omegaMatcher); ok {
+ vStr = "or to have that key's value not matching"
+ }
+
+ return format.Message(actual, kStr, matcher.Key, vStr, matcher.Value)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
new file mode 100644
index 0000000..ee42761
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
@@ -0,0 +1,28 @@
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type HaveLenMatcher struct {
+ Count int
+}
+
+func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) {
+ length, ok := lengthOf(actual)
+ if !ok {
+ return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1))
+ }
+
+ return length == matcher.Count, nil
+}
+
+func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count)
+}
+
+func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
new file mode 100644
index 0000000..22a1b67
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
@@ -0,0 +1,35 @@
+// untested sections: 2
+
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type HaveOccurredMatcher struct {
+}
+
+func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) {
+ // is purely nil?
+ if actual == nil {
+ return false, nil
+ }
+
+ // must be an 'error' type
+ if !isError(actual) {
+ return false, fmt.Errorf("Expected an error-type. Got:\n%s", format.Object(actual, 1))
+ }
+
+ // must be non-nil (or a pointer to a non-nil)
+ return !isNil(actual), nil
+}
+
+func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected an error to have occurred. Got:\n%s", format.Object(actual, 1))
+}
+
+func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Unexpected error:\n%s\n%s", format.Object(actual, 1), "occurred")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
new file mode 100644
index 0000000..1d8e802
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
@@ -0,0 +1,36 @@
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type HavePrefixMatcher struct {
+ Prefix string
+ Args []interface{}
+}
+
+func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) {
+ actualString, ok := toString(actual)
+ if !ok {
+ return false, fmt.Errorf("HavePrefix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
+ }
+ prefix := matcher.prefix()
+ return len(actualString) >= len(prefix) && actualString[0:len(prefix)] == prefix, nil
+}
+
+func (matcher *HavePrefixMatcher) prefix() string {
+ if len(matcher.Args) > 0 {
+ return fmt.Sprintf(matcher.Prefix, matcher.Args...)
+ }
+ return matcher.Prefix
+}
+
+func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to have prefix", matcher.prefix())
+}
+
+func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to have prefix", matcher.prefix())
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
new file mode 100644
index 0000000..40a3526
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
@@ -0,0 +1,36 @@
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type HaveSuffixMatcher struct {
+ Suffix string
+ Args []interface{}
+}
+
+func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) {
+ actualString, ok := toString(actual)
+ if !ok {
+ return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
+ }
+ suffix := matcher.suffix()
+ return len(actualString) >= len(suffix) && actualString[len(actualString)-len(suffix):] == suffix, nil
+}
+
+func (matcher *HaveSuffixMatcher) suffix() string {
+ if len(matcher.Args) > 0 {
+ return fmt.Sprintf(matcher.Suffix, matcher.Args...)
+ }
+ return matcher.Suffix
+}
+
+func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to have suffix", matcher.suffix())
+}
+
+func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to have suffix", matcher.suffix())
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_value.go b/vendor/github.com/onsi/gomega/matchers/have_value.go
new file mode 100644
index 0000000..f672528
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_value.go
@@ -0,0 +1,54 @@
+package matchers
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+const maxIndirections = 31
+
+type HaveValueMatcher struct {
+ Matcher types.GomegaMatcher // the matcher to apply to the "resolved" actual value.
+ resolvedActual interface{} // the ("resolved") value.
+}
+
+func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) {
+ val := reflect.ValueOf(actual)
+ for allowedIndirs := maxIndirections; allowedIndirs > 0; allowedIndirs-- {
+ // return an error if value isn't valid. Please note that we cannot
+ // check for nil here, as we might not deal with a pointer or interface
+ // at this point.
+ if !val.IsValid() {
+ return false, errors.New(format.Message(
+ actual, "not to be "))
+ }
+ switch val.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ // resolve pointers and interfaces to their values, then rinse and
+ // repeat.
+ if val.IsNil() {
+ return false, errors.New(format.Message(
+ actual, "not to be "))
+ }
+ val = val.Elem()
+ continue
+ default:
+ // forward the final value to the specified matcher.
+ m.resolvedActual = val.Interface()
+ return m.Matcher.Match(m.resolvedActual)
+ }
+ }
+ // too many indirections: extreme star gazing, indeed...?
+ return false, errors.New(format.Message(actual, "too many indirections"))
+}
+
+func (m *HaveValueMatcher) FailureMessage(_ interface{}) (message string) {
+ return m.Matcher.FailureMessage(m.resolvedActual)
+}
+
+func (m *HaveValueMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+ return m.Matcher.NegatedFailureMessage(m.resolvedActual)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
new file mode 100644
index 0000000..c539dd3
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
@@ -0,0 +1,86 @@
+package matchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type MatchErrorMatcher struct {
+ Expected any
+ FuncErrDescription []any
+ isFunc bool
+}
+
+func (matcher *MatchErrorMatcher) Match(actual any) (success bool, err error) {
+ matcher.isFunc = false
+
+ if isNil(actual) {
+ return false, fmt.Errorf("Expected an error, got nil")
+ }
+
+ if !isError(actual) {
+ return false, fmt.Errorf("Expected an error. Got:\n%s", format.Object(actual, 1))
+ }
+
+ actualErr := actual.(error)
+ expected := matcher.Expected
+
+ if isError(expected) {
+ // first try the built-in errors.Is
+ if errors.Is(actualErr, expected.(error)) {
+ return true, nil
+ }
+ // if not, try DeepEqual along the error chain
+ for unwrapped := actualErr; unwrapped != nil; unwrapped = errors.Unwrap(unwrapped) {
+ if reflect.DeepEqual(unwrapped, expected) {
+ return true, nil
+ }
+ }
+ return false, nil
+ }
+
+ if isString(expected) {
+ return actualErr.Error() == expected, nil
+ }
+
+ v := reflect.ValueOf(expected)
+ t := v.Type()
+ errorInterface := reflect.TypeOf((*error)(nil)).Elem()
+ if t.Kind() == reflect.Func && t.NumIn() == 1 && t.In(0).Implements(errorInterface) && t.NumOut() == 1 && t.Out(0).Kind() == reflect.Bool {
+ if len(matcher.FuncErrDescription) == 0 {
+ return false, fmt.Errorf("MatchError requires an additional description when passed a function")
+ }
+ matcher.isFunc = true
+ return v.Call([]reflect.Value{reflect.ValueOf(actualErr)})[0].Bool(), nil
+ }
+
+ var subMatcher omegaMatcher
+ var hasSubMatcher bool
+ if expected != nil {
+ subMatcher, hasSubMatcher = (expected).(omegaMatcher)
+ if hasSubMatcher {
+ return subMatcher.Match(actualErr.Error())
+ }
+ }
+
+ return false, fmt.Errorf(
+ "MatchError must be passed an error, a string, or a Matcher that can match on strings. Got:\n%s",
+ format.Object(expected, 1))
+}
+
+func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) {
+ if matcher.isFunc {
+ return format.Message(actual, fmt.Sprintf("to match error function %s", matcher.FuncErrDescription[0]))
+ }
+ return format.Message(actual, "to match error", matcher.Expected)
+}
+
+func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ if matcher.isFunc {
+ return format.Message(actual, fmt.Sprintf("not to match error function %s", matcher.FuncErrDescription[0]))
+ }
+ return format.Message(actual, "not to match error", matcher.Expected)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
new file mode 100644
index 0000000..f962f13
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
@@ -0,0 +1,65 @@
+package matchers
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type MatchJSONMatcher struct {
+ JSONToMatch interface{}
+ firstFailurePath []interface{}
+}
+
+func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) {
+ actualString, expectedString, err := matcher.prettyPrint(actual)
+ if err != nil {
+ return false, err
+ }
+
+ var aval interface{}
+ var eval interface{}
+
+ // this is guarded by prettyPrint
+ json.Unmarshal([]byte(actualString), &aval)
+ json.Unmarshal([]byte(expectedString), &eval)
+ var equal bool
+ equal, matcher.firstFailurePath = deepEqual(aval, eval)
+ return equal, nil
+}
+
+func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) {
+ actualString, expectedString, _ := matcher.prettyPrint(actual)
+ return formattedMessage(format.Message(actualString, "to match JSON of", expectedString), matcher.firstFailurePath)
+}
+
+func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ actualString, expectedString, _ := matcher.prettyPrint(actual)
+ return formattedMessage(format.Message(actualString, "not to match JSON of", expectedString), matcher.firstFailurePath)
+}
+
+func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+ actualString, ok := toString(actual)
+ if !ok {
+ return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1))
+ }
+ expectedString, ok := toString(matcher.JSONToMatch)
+ if !ok {
+ return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.JSONToMatch, 1))
+ }
+
+ abuf := new(bytes.Buffer)
+ ebuf := new(bytes.Buffer)
+
+ if err := json.Indent(abuf, []byte(actualString), "", " "); err != nil {
+ return "", "", fmt.Errorf("Actual '%s' should be valid JSON, but it is not.\nUnderlying error:%s", actualString, err)
+ }
+
+ if err := json.Indent(ebuf, []byte(expectedString), "", " "); err != nil {
+ return "", "", fmt.Errorf("Expected '%s' should be valid JSON, but it is not.\nUnderlying error:%s", expectedString, err)
+ }
+
+ return abuf.String(), ebuf.String(), nil
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
new file mode 100644
index 0000000..adac5db
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
@@ -0,0 +1,43 @@
+package matchers
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/onsi/gomega/format"
+)
+
+type MatchRegexpMatcher struct {
+ Regexp string
+ Args []interface{}
+}
+
+func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) {
+ actualString, ok := toString(actual)
+ if !ok {
+ return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1))
+ }
+
+ match, err := regexp.Match(matcher.regexp(), []byte(actualString))
+ if err != nil {
+ return false, fmt.Errorf("RegExp match failed to compile with error:\n\t%s", err.Error())
+ }
+
+ return match, nil
+}
+
+func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to match regular expression", matcher.regexp())
+}
+
+func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to match regular expression", matcher.regexp())
+}
+
+func (matcher *MatchRegexpMatcher) regexp() string {
+ re := matcher.Regexp
+ if len(matcher.Args) > 0 {
+ re = fmt.Sprintf(matcher.Regexp, matcher.Args...)
+ }
+ return re
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
new file mode 100644
index 0000000..5c815f5
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
@@ -0,0 +1,134 @@
+package matchers
+
+import (
+ "bytes"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/onsi/gomega/format"
+ "golang.org/x/net/html/charset"
+)
+
+type MatchXMLMatcher struct {
+ XMLToMatch interface{}
+}
+
+func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err error) {
+ actualString, expectedString, err := matcher.formattedPrint(actual)
+ if err != nil {
+ return false, err
+ }
+
+ aval, err := parseXmlContent(actualString)
+ if err != nil {
+ return false, fmt.Errorf("Actual '%s' should be valid XML, but it is not.\nUnderlying error:%s", actualString, err)
+ }
+
+ eval, err := parseXmlContent(expectedString)
+ if err != nil {
+ return false, fmt.Errorf("Expected '%s' should be valid XML, but it is not.\nUnderlying error:%s", expectedString, err)
+ }
+
+ return reflect.DeepEqual(aval, eval), nil
+}
+
+func (matcher *MatchXMLMatcher) FailureMessage(actual interface{}) (message string) {
+ actualString, expectedString, _ := matcher.formattedPrint(actual)
+ return fmt.Sprintf("Expected\n%s\nto match XML of\n%s", actualString, expectedString)
+}
+
+func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ actualString, expectedString, _ := matcher.formattedPrint(actual)
+ return fmt.Sprintf("Expected\n%s\nnot to match XML of\n%s", actualString, expectedString)
+}
+
+func (matcher *MatchXMLMatcher) formattedPrint(actual interface{}) (actualString, expectedString string, err error) {
+ var ok bool
+ actualString, ok = toString(actual)
+ if !ok {
+ return "", "", fmt.Errorf("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1))
+ }
+ expectedString, ok = toString(matcher.XMLToMatch)
+ if !ok {
+ return "", "", fmt.Errorf("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.XMLToMatch, 1))
+ }
+ return actualString, expectedString, nil
+}
+
+func parseXmlContent(content string) (*xmlNode, error) {
+ allNodes := []*xmlNode{}
+
+ dec := newXmlDecoder(strings.NewReader(content))
+ for {
+ tok, err := dec.Token()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, fmt.Errorf("failed to decode next token: %v", err) // untested section
+ }
+
+ lastNodeIndex := len(allNodes) - 1
+ var lastNode *xmlNode
+ if len(allNodes) > 0 {
+ lastNode = allNodes[lastNodeIndex]
+ } else {
+ lastNode = &xmlNode{}
+ }
+
+ switch tok := tok.(type) {
+ case xml.StartElement:
+ attrs := attributesSlice(tok.Attr)
+ sort.Sort(attrs)
+ allNodes = append(allNodes, &xmlNode{XMLName: tok.Name, XMLAttr: tok.Attr})
+ case xml.EndElement:
+ if len(allNodes) > 1 {
+ allNodes[lastNodeIndex-1].Nodes = append(allNodes[lastNodeIndex-1].Nodes, lastNode)
+ allNodes = allNodes[:lastNodeIndex]
+ }
+ case xml.CharData:
+ lastNode.Content = append(lastNode.Content, tok.Copy()...)
+ case xml.Comment:
+ lastNode.Comments = append(lastNode.Comments, tok.Copy()) // untested section
+ case xml.ProcInst:
+ lastNode.ProcInsts = append(lastNode.ProcInsts, tok.Copy())
+ }
+ }
+
+ if len(allNodes) == 0 {
+ return nil, errors.New("found no nodes")
+ }
+ firstNode := allNodes[0]
+ trimParentNodesContentSpaces(firstNode)
+
+ return firstNode, nil
+}
+
+func newXmlDecoder(reader io.Reader) *xml.Decoder {
+ dec := xml.NewDecoder(reader)
+ dec.CharsetReader = charset.NewReaderLabel
+ return dec
+}
+
+func trimParentNodesContentSpaces(node *xmlNode) {
+ if len(node.Nodes) > 0 {
+ node.Content = bytes.TrimSpace(node.Content)
+ for _, childNode := range node.Nodes {
+ trimParentNodesContentSpaces(childNode)
+ }
+ }
+}
+
+type xmlNode struct {
+ XMLName xml.Name
+ Comments []xml.Comment
+ ProcInsts []xml.ProcInst
+ XMLAttr []xml.Attr
+ Content []byte
+ Nodes []*xmlNode
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
new file mode 100644
index 0000000..2cb6b47
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
@@ -0,0 +1,76 @@
+package matchers
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/onsi/gomega/format"
+ "gopkg.in/yaml.v3"
+)
+
+type MatchYAMLMatcher struct {
+ YAMLToMatch interface{}
+ firstFailurePath []interface{}
+}
+
+func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err error) {
+ actualString, expectedString, err := matcher.toStrings(actual)
+ if err != nil {
+ return false, err
+ }
+
+ var aval interface{}
+ var eval interface{}
+
+ if err := yaml.Unmarshal([]byte(actualString), &aval); err != nil {
+ return false, fmt.Errorf("Actual '%s' should be valid YAML, but it is not.\nUnderlying error:%s", actualString, err)
+ }
+ if err := yaml.Unmarshal([]byte(expectedString), &eval); err != nil {
+ return false, fmt.Errorf("Expected '%s' should be valid YAML, but it is not.\nUnderlying error:%s", expectedString, err)
+ }
+
+ var equal bool
+ equal, matcher.firstFailurePath = deepEqual(aval, eval)
+ return equal, nil
+}
+
+func (matcher *MatchYAMLMatcher) FailureMessage(actual interface{}) (message string) {
+ actualString, expectedString, _ := matcher.toNormalisedStrings(actual)
+ return formattedMessage(format.Message(actualString, "to match YAML of", expectedString), matcher.firstFailurePath)
+}
+
+func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ actualString, expectedString, _ := matcher.toNormalisedStrings(actual)
+ return formattedMessage(format.Message(actualString, "not to match YAML of", expectedString), matcher.firstFailurePath)
+}
+
+func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+ actualString, expectedString, err := matcher.toStrings(actual)
+ return normalise(actualString), normalise(expectedString), err
+}
+
+func normalise(input string) string {
+ var val interface{}
+ err := yaml.Unmarshal([]byte(input), &val)
+ if err != nil {
+ panic(err) // unreachable since Match already calls Unmarshal
+ }
+ output, err := yaml.Marshal(val)
+ if err != nil {
+ panic(err) // untested section, unreachable since we Unmarshal above
+ }
+ return strings.TrimSpace(string(output))
+}
+
+func (matcher *MatchYAMLMatcher) toStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+ actualString, ok := toString(actual)
+ if !ok {
+ return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1))
+ }
+ expectedString, ok := toString(matcher.YAMLToMatch)
+ if !ok {
+ return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.YAMLToMatch, 1))
+ }
+
+ return actualString, expectedString, nil
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go
new file mode 100644
index 0000000..78b7191
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/not.go
@@ -0,0 +1,29 @@
+package matchers
+
+import (
+ "github.com/onsi/gomega/types"
+)
+
+type NotMatcher struct {
+ Matcher types.GomegaMatcher
+}
+
+func (m *NotMatcher) Match(actual interface{}) (bool, error) {
+ success, err := m.Matcher.Match(actual)
+ if err != nil {
+ return false, err
+ }
+ return !success, nil
+}
+
+func (m *NotMatcher) FailureMessage(actual interface{}) (message string) {
+ return m.Matcher.NegatedFailureMessage(actual) // works beautifully
+}
+
+func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return m.Matcher.FailureMessage(actual) // works beautifully
+}
+
+func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+ return types.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go
new file mode 100644
index 0000000..841ae26
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/or.go
@@ -0,0 +1,66 @@
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+type OrMatcher struct {
+ Matchers []types.GomegaMatcher
+
+ // state
+ firstSuccessfulMatcher types.GomegaMatcher
+}
+
+func (m *OrMatcher) Match(actual interface{}) (success bool, err error) {
+ m.firstSuccessfulMatcher = nil
+ for _, matcher := range m.Matchers {
+ success, err := matcher.Match(actual)
+ if err != nil {
+ return false, err
+ }
+ if success {
+ m.firstSuccessfulMatcher = matcher
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func (m *OrMatcher) FailureMessage(actual interface{}) (message string) {
+ // not the most beautiful list of matchers, but not bad either...
+ return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers))
+}
+
+func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return m.firstSuccessfulMatcher.NegatedFailureMessage(actual)
+}
+
+func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+ /*
+ Example with 3 matchers: A, B, C
+
+ Match evaluates them: F, T, > => T
+ So match is currently T, what should MatchMayChangeInTheFuture() return?
+ Seems like it only depends on B, since currently B MUST change to allow the result to become F
+
+ Match eval: F, F, F => F
+ So match is currently F, what should MatchMayChangeInTheFuture() return?
+ Seems to depend on ANY of them being able to change to T.
+ */
+
+ if m.firstSuccessfulMatcher != nil {
+ // one of the matchers succeeded.. it must be able to change in order to affect the result
+ return types.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual)
+ } else {
+ // so all matchers failed.. Any one of them changing would change the result.
+ for _, matcher := range m.Matchers {
+ if types.MatchMayChangeInTheFuture(matcher, actual) {
+ return true
+ }
+ }
+ return false // none of were going to change
+ }
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
new file mode 100644
index 0000000..adc8cee
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
@@ -0,0 +1,114 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type PanicMatcher struct {
+ Expected interface{}
+ object interface{}
+}
+
+func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) {
+ if actual == nil {
+ return false, fmt.Errorf("PanicMatcher expects a non-nil actual.")
+ }
+
+ actualType := reflect.TypeOf(actual)
+ if actualType.Kind() != reflect.Func {
+ return false, fmt.Errorf("PanicMatcher expects a function. Got:\n%s", format.Object(actual, 1))
+ }
+ if !(actualType.NumIn() == 0 && actualType.NumOut() == 0) {
+ return false, fmt.Errorf("PanicMatcher expects a function with no arguments and no return value. Got:\n%s", format.Object(actual, 1))
+ }
+
+ success = false
+ defer func() {
+ if e := recover(); e != nil {
+ matcher.object = e
+
+ if matcher.Expected == nil {
+ success = true
+ return
+ }
+
+ valueMatcher, valueIsMatcher := matcher.Expected.(omegaMatcher)
+ if !valueIsMatcher {
+ valueMatcher = &EqualMatcher{Expected: matcher.Expected}
+ }
+
+ success, err = valueMatcher.Match(e)
+ if err != nil {
+ err = fmt.Errorf("PanicMatcher's value matcher failed with:\n%s%s", format.Indent, err.Error())
+ }
+ }
+ }()
+
+ reflect.ValueOf(actual).Call([]reflect.Value{})
+
+ return
+}
+
+func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) {
+ if matcher.Expected == nil {
+ // We wanted any panic to occur, but none did.
+ return format.Message(actual, "to panic")
+ }
+
+ if matcher.object == nil {
+ // We wanted a panic with a specific value to occur, but none did.
+ switch matcher.Expected.(type) {
+ case omegaMatcher:
+ return format.Message(actual, "to panic with a value matching", matcher.Expected)
+ default:
+ return format.Message(actual, "to panic with", matcher.Expected)
+ }
+ }
+
+ // We got a panic, but the value isn't what we expected.
+ switch matcher.Expected.(type) {
+ case omegaMatcher:
+ return format.Message(
+ actual,
+ fmt.Sprintf(
+ "to panic with a value matching\n%s\nbut panicked with\n%s",
+ format.Object(matcher.Expected, 1),
+ format.Object(matcher.object, 1),
+ ),
+ )
+ default:
+ return format.Message(
+ actual,
+ fmt.Sprintf(
+ "to panic with\n%s\nbut panicked with\n%s",
+ format.Object(matcher.Expected, 1),
+ format.Object(matcher.object, 1),
+ ),
+ )
+ }
+}
+
+func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ if matcher.Expected == nil {
+ // We didn't want any panic to occur, but one did.
+ return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1)))
+ }
+
+ // We wanted a to ensure a panic with a specific value did not occur, but it did.
+ switch matcher.Expected.(type) {
+ case omegaMatcher:
+ return format.Message(
+ actual,
+ fmt.Sprintf(
+ "not to panic with a value matching\n%s\nbut panicked with\n%s",
+ format.Object(matcher.Expected, 1),
+ format.Object(matcher.object, 1),
+ ),
+ )
+ default:
+ return format.Message(actual, "not to panic with", matcher.Expected)
+ }
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
new file mode 100644
index 0000000..948164e
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
@@ -0,0 +1,166 @@
+// untested sections: 3
+
+package matchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type ReceiveMatcher struct {
+ Args []interface{}
+ receivedValue reflect.Value
+ channelClosed bool
+}
+
+func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isChan(actual) {
+ return false, fmt.Errorf("ReceiveMatcher expects a channel. Got:\n%s", format.Object(actual, 1))
+ }
+
+ channelType := reflect.TypeOf(actual)
+ channelValue := reflect.ValueOf(actual)
+
+ if channelType.ChanDir() == reflect.SendDir {
+ return false, fmt.Errorf("ReceiveMatcher matcher cannot be passed a send-only channel. Got:\n%s", format.Object(actual, 1))
+ }
+
+ var subMatcher omegaMatcher
+ var hasSubMatcher bool
+ var resultReference interface{}
+
+ // Valid arg formats are as follows, always with optional POINTER before
+ // optional MATCHER:
+ // - Receive()
+ // - Receive(POINTER)
+ // - Receive(MATCHER)
+ // - Receive(POINTER, MATCHER)
+ args := matcher.Args
+ if len(args) > 0 {
+ arg := args[0]
+ _, isSubMatcher := arg.(omegaMatcher)
+ if !isSubMatcher && reflect.ValueOf(arg).Kind() == reflect.Ptr {
+ // Consume optional POINTER arg first, if it ain't no matcher ;)
+ resultReference = arg
+ args = args[1:]
+ }
+ }
+ if len(args) > 0 {
+ arg := args[0]
+ subMatcher, hasSubMatcher = arg.(omegaMatcher)
+ if !hasSubMatcher {
+ // At this point we assume the dev user wanted to assign a received
+ // value, so [POINTER,]MATCHER.
+ return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(arg, 1))
+ }
+ // Consume optional MATCHER arg.
+ args = args[1:]
+ }
+ if len(args) > 0 {
+ // If there are still args present, reject all.
+ return false, errors.New("Receive matcher expects at most an optional pointer and/or an optional matcher")
+ }
+
+ winnerIndex, value, open := reflect.Select([]reflect.SelectCase{
+ {Dir: reflect.SelectRecv, Chan: channelValue},
+ {Dir: reflect.SelectDefault},
+ })
+
+ var closed bool
+ var didReceive bool
+ if winnerIndex == 0 {
+ closed = !open
+ didReceive = open
+ }
+ matcher.channelClosed = closed
+
+ if closed {
+ return false, nil
+ }
+
+ if hasSubMatcher {
+ if !didReceive {
+ return false, nil
+ }
+ matcher.receivedValue = value
+ if match, err := subMatcher.Match(matcher.receivedValue.Interface()); err != nil || !match {
+ return match, err
+ }
+ // if we received a match, then fall through in order to handle an
+ // optional assignment of the received value to the specified reference.
+ }
+
+ if didReceive {
+ if resultReference != nil {
+ outValue := reflect.ValueOf(resultReference)
+
+ if value.Type().AssignableTo(outValue.Elem().Type()) {
+ outValue.Elem().Set(value)
+ return true, nil
+ }
+ if value.Type().Kind() == reflect.Interface && value.Elem().Type().AssignableTo(outValue.Elem().Type()) {
+ outValue.Elem().Set(value.Elem())
+ return true, nil
+ } else {
+ return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(resultReference, 1))
+ }
+
+ }
+
+ return true, nil
+ }
+ return false, nil
+}
+
+func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) {
+ var matcherArg interface{}
+ if len(matcher.Args) > 0 {
+ matcherArg = matcher.Args[len(matcher.Args)-1]
+ }
+ subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher)
+
+ closedAddendum := ""
+ if matcher.channelClosed {
+ closedAddendum = " The channel is closed."
+ }
+
+ if hasSubMatcher {
+ if matcher.receivedValue.IsValid() {
+ return subMatcher.FailureMessage(matcher.receivedValue.Interface())
+ }
+ return "When passed a matcher, ReceiveMatcher's channel *must* receive something."
+ }
+ return format.Message(actual, "to receive something."+closedAddendum)
+}
+
+func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ var matcherArg interface{}
+ if len(matcher.Args) > 0 {
+ matcherArg = matcher.Args[len(matcher.Args)-1]
+ }
+ subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher)
+
+ closedAddendum := ""
+ if matcher.channelClosed {
+ closedAddendum = " The channel is closed."
+ }
+
+ if hasSubMatcher {
+ if matcher.receivedValue.IsValid() {
+ return subMatcher.NegatedFailureMessage(matcher.receivedValue.Interface())
+ }
+ return "When passed a matcher, ReceiveMatcher's channel *must* receive something."
+ }
+ return format.Message(actual, "not to receive anything."+closedAddendum)
+}
+
+func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+ if !isChan(actual) {
+ return false
+ }
+
+ return !matcher.channelClosed
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go b/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
new file mode 100644
index 0000000..ec68fe8
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
@@ -0,0 +1,66 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type SatisfyMatcher struct {
+ Predicate interface{}
+
+ // cached type
+ predicateArgType reflect.Type
+}
+
+func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher {
+ if predicate == nil {
+ panic("predicate cannot be nil")
+ }
+ predicateType := reflect.TypeOf(predicate)
+ if predicateType.Kind() != reflect.Func {
+ panic("predicate must be a function")
+ }
+ if predicateType.NumIn() != 1 {
+ panic("predicate must have 1 argument")
+ }
+ if predicateType.NumOut() != 1 || predicateType.Out(0).Kind() != reflect.Bool {
+ panic("predicate must return bool")
+ }
+
+ return &SatisfyMatcher{
+ Predicate: predicate,
+ predicateArgType: predicateType.In(0),
+ }
+}
+
+func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) {
+ // prepare a parameter to pass to the predicate
+ var param reflect.Value
+ if actual != nil && reflect.TypeOf(actual).AssignableTo(m.predicateArgType) {
+ // The dynamic type of actual is compatible with the predicate argument.
+ param = reflect.ValueOf(actual)
+
+ } else if actual == nil && m.predicateArgType.Kind() == reflect.Interface {
+ // The dynamic type of actual is unknown, so there's no way to make its
+ // reflect.Value. Create a nil of the predicate argument, which is known.
+ param = reflect.Zero(m.predicateArgType)
+
+ } else {
+ return false, fmt.Errorf("predicate expects '%s' but we have '%T'", m.predicateArgType, actual)
+ }
+
+ // call the predicate with `actual`
+ fn := reflect.ValueOf(m.Predicate)
+ result := fn.Call([]reflect.Value{param})
+ return result[0].Bool(), nil
+}
+
+func (m *SatisfyMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to satisfy predicate", m.Predicate)
+}
+
+func (m *SatisfyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to not satisfy predicate", m.Predicate)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
new file mode 100644
index 0000000..1369c1e
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
@@ -0,0 +1,94 @@
+// untested sections: 5
+
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+func formattedMessage(comparisonMessage string, failurePath []interface{}) string {
+ var diffMessage string
+ if len(failurePath) == 0 {
+ diffMessage = ""
+ } else {
+ diffMessage = fmt.Sprintf("\n\nfirst mismatched key: %s", formattedFailurePath(failurePath))
+ }
+ return fmt.Sprintf("%s%s", comparisonMessage, diffMessage)
+}
+
+func formattedFailurePath(failurePath []interface{}) string {
+ formattedPaths := []string{}
+ for i := len(failurePath) - 1; i >= 0; i-- {
+ switch p := failurePath[i].(type) {
+ case int:
+ formattedPaths = append(formattedPaths, fmt.Sprintf(`[%d]`, p))
+ default:
+ if i != len(failurePath)-1 {
+ formattedPaths = append(formattedPaths, ".")
+ }
+ formattedPaths = append(formattedPaths, fmt.Sprintf(`"%s"`, p))
+ }
+ }
+ return strings.Join(formattedPaths, "")
+}
+
+func deepEqual(a interface{}, b interface{}) (bool, []interface{}) {
+ var errorPath []interface{}
+ if reflect.TypeOf(a) != reflect.TypeOf(b) {
+ return false, errorPath
+ }
+
+ switch a.(type) {
+ case []interface{}:
+ if len(a.([]interface{})) != len(b.([]interface{})) {
+ return false, errorPath
+ }
+
+ for i, v := range a.([]interface{}) {
+ elementEqual, keyPath := deepEqual(v, b.([]interface{})[i])
+ if !elementEqual {
+ return false, append(keyPath, i)
+ }
+ }
+ return true, errorPath
+
+ case map[interface{}]interface{}:
+ if len(a.(map[interface{}]interface{})) != len(b.(map[interface{}]interface{})) {
+ return false, errorPath
+ }
+
+ for k, v1 := range a.(map[interface{}]interface{}) {
+ v2, ok := b.(map[interface{}]interface{})[k]
+ if !ok {
+ return false, errorPath
+ }
+ elementEqual, keyPath := deepEqual(v1, v2)
+ if !elementEqual {
+ return false, append(keyPath, k)
+ }
+ }
+ return true, errorPath
+
+ case map[string]interface{}:
+ if len(a.(map[string]interface{})) != len(b.(map[string]interface{})) {
+ return false, errorPath
+ }
+
+ for k, v1 := range a.(map[string]interface{}) {
+ v2, ok := b.(map[string]interface{})[k]
+ if !ok {
+ return false, errorPath
+ }
+ elementEqual, keyPath := deepEqual(v1, v2)
+ if !elementEqual {
+ return false, append(keyPath, k)
+ }
+ }
+ return true, errorPath
+
+ default:
+ return a == b, errorPath
+ }
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
new file mode 100644
index 0000000..327350f
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
@@ -0,0 +1,42 @@
+package matchers
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type formattedGomegaError interface {
+ FormattedGomegaError() string
+}
+
+type SucceedMatcher struct {
+}
+
+func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) {
+ // is purely nil?
+ if actual == nil {
+ return true, nil
+ }
+
+ // must be an 'error' type
+ if !isError(actual) {
+ return false, fmt.Errorf("Expected an error-type. Got:\n%s", format.Object(actual, 1))
+ }
+
+ // must be nil (or a pointer to a nil)
+ return isNil(actual), nil
+}
+
+func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) {
+ var fgErr formattedGomegaError
+ if errors.As(actual.(error), &fgErr) {
+ return fgErr.FormattedGomegaError()
+ }
+ return fmt.Sprintf("Expected success, but got an error:\n%s", format.Object(actual, 1))
+}
+
+func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return "Expected failure, but got no error."
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
new file mode 100644
index 0000000..830e308
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
@@ -0,0 +1,56 @@
+package bipartitegraph
+
+import "fmt"
+
+import . "github.com/onsi/gomega/matchers/support/goraph/node"
+import . "github.com/onsi/gomega/matchers/support/goraph/edge"
+
+type BipartiteGraph struct {
+ Left NodeOrderedSet
+ Right NodeOrderedSet
+ Edges EdgeSet
+}
+
+func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) {
+ left := NodeOrderedSet{}
+ for i, v := range leftValues {
+ left = append(left, Node{ID: i, Value: v})
+ }
+
+ right := NodeOrderedSet{}
+ for j, v := range rightValues {
+ right = append(right, Node{ID: j + len(left), Value: v})
+ }
+
+ edges := EdgeSet{}
+ for i, leftValue := range leftValues {
+ for j, rightValue := range rightValues {
+ neighbours, err := neighbours(leftValue, rightValue)
+ if err != nil {
+ return nil, fmt.Errorf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error())
+ }
+
+ if neighbours {
+ edges = append(edges, Edge{Node1: left[i].ID, Node2: right[j].ID})
+ }
+ }
+ }
+
+ return &BipartiteGraph{left, right, edges}, nil
+}
+
+// FreeLeftRight returns left node values and right node values
+// of the BipartiteGraph's nodes which are not part of the given edges.
+func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []interface{}) {
+ for _, node := range bg.Left {
+ if edges.Free(node) {
+ leftValues = append(leftValues, node.Value)
+ }
+ }
+ for _, node := range bg.Right {
+ if edges.Free(node) {
+ rightValues = append(rightValues, node.Value)
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go
new file mode 100644
index 0000000..1c54edd
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go
@@ -0,0 +1,164 @@
+package bipartitegraph
+
+import (
+ . "github.com/onsi/gomega/matchers/support/goraph/edge"
+ . "github.com/onsi/gomega/matchers/support/goraph/node"
+ "github.com/onsi/gomega/matchers/support/goraph/util"
+)
+
+// LargestMatching implements the Hopcroft–Karp algorithm taking as input a bipartite graph
+// and outputting a maximum cardinality matching, i.e. a set of as many edges as possible
+// with the property that no two edges share an endpoint.
+func (bg *BipartiteGraph) LargestMatching() (matching EdgeSet) {
+ paths := bg.maximalDisjointSLAPCollection(matching)
+
+ for len(paths) > 0 {
+ for _, path := range paths {
+ matching = matching.SymmetricDifference(path)
+ }
+ paths = bg.maximalDisjointSLAPCollection(matching)
+ }
+
+ return
+}
+
+func (bg *BipartiteGraph) maximalDisjointSLAPCollection(matching EdgeSet) (result []EdgeSet) {
+ guideLayers := bg.createSLAPGuideLayers(matching)
+ if len(guideLayers) == 0 {
+ return
+ }
+
+ used := make(map[int]bool)
+
+ for _, u := range guideLayers[len(guideLayers)-1] {
+ slap, found := bg.findDisjointSLAP(u, matching, guideLayers, used)
+ if found {
+ for _, edge := range slap {
+ used[edge.Node1] = true
+ used[edge.Node2] = true
+ }
+ result = append(result, slap)
+ }
+ }
+
+ return
+}
+
+func (bg *BipartiteGraph) findDisjointSLAP(
+ start Node,
+ matching EdgeSet,
+ guideLayers []NodeOrderedSet,
+ used map[int]bool,
+) ([]Edge, bool) {
+ return bg.findDisjointSLAPHelper(start, EdgeSet{}, len(guideLayers)-1, matching, guideLayers, used)
+}
+
+func (bg *BipartiteGraph) findDisjointSLAPHelper(
+ currentNode Node,
+ currentSLAP EdgeSet,
+ currentLevel int,
+ matching EdgeSet,
+ guideLayers []NodeOrderedSet,
+ used map[int]bool,
+) (EdgeSet, bool) {
+ used[currentNode.ID] = true
+
+ if currentLevel == 0 {
+ return currentSLAP, true
+ }
+
+ for _, nextNode := range guideLayers[currentLevel-1] {
+ if used[nextNode.ID] {
+ continue
+ }
+
+ edge, found := bg.Edges.FindByNodes(currentNode, nextNode)
+ if !found {
+ continue
+ }
+
+ if matching.Contains(edge) == util.Odd(currentLevel) {
+ continue
+ }
+
+ currentSLAP = append(currentSLAP, edge)
+ slap, found := bg.findDisjointSLAPHelper(nextNode, currentSLAP, currentLevel-1, matching, guideLayers, used)
+ if found {
+ return slap, true
+ }
+ currentSLAP = currentSLAP[:len(currentSLAP)-1]
+ }
+
+ used[currentNode.ID] = false
+ return nil, false
+}
+
+func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers []NodeOrderedSet) {
+ used := make(map[int]bool)
+ currentLayer := NodeOrderedSet{}
+
+ for _, node := range bg.Left {
+ if matching.Free(node) {
+ used[node.ID] = true
+ currentLayer = append(currentLayer, node)
+ }
+ }
+
+ if len(currentLayer) == 0 {
+ return []NodeOrderedSet{}
+ }
+ guideLayers = append(guideLayers, currentLayer)
+
+ done := false
+
+ for !done {
+ lastLayer := currentLayer
+ currentLayer = NodeOrderedSet{}
+
+ if util.Odd(len(guideLayers)) {
+ for _, leftNode := range lastLayer {
+ for _, rightNode := range bg.Right {
+ if used[rightNode.ID] {
+ continue
+ }
+
+ edge, found := bg.Edges.FindByNodes(leftNode, rightNode)
+ if !found || matching.Contains(edge) {
+ continue
+ }
+
+ currentLayer = append(currentLayer, rightNode)
+ used[rightNode.ID] = true
+
+ if matching.Free(rightNode) {
+ done = true
+ }
+ }
+ }
+ } else {
+ for _, rightNode := range lastLayer {
+ for _, leftNode := range bg.Left {
+ if used[leftNode.ID] {
+ continue
+ }
+
+ edge, found := bg.Edges.FindByNodes(leftNode, rightNode)
+ if !found || !matching.Contains(edge) {
+ continue
+ }
+
+ currentLayer = append(currentLayer, leftNode)
+ used[leftNode.ID] = true
+ }
+ }
+
+ }
+
+ if len(currentLayer) == 0 {
+ return []NodeOrderedSet{}
+ }
+ guideLayers = append(guideLayers, currentLayer)
+ }
+
+ return
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go
new file mode 100644
index 0000000..8c38411
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go
@@ -0,0 +1,61 @@
+package edge
+
+import . "github.com/onsi/gomega/matchers/support/goraph/node"
+
+type Edge struct {
+ Node1 int
+ Node2 int
+}
+
+type EdgeSet []Edge
+
+func (ec EdgeSet) Free(node Node) bool {
+ for _, e := range ec {
+ if e.Node1 == node.ID || e.Node2 == node.ID {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (ec EdgeSet) Contains(edge Edge) bool {
+ for _, e := range ec {
+ if e == edge {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (ec EdgeSet) FindByNodes(node1, node2 Node) (Edge, bool) {
+ for _, e := range ec {
+ if (e.Node1 == node1.ID && e.Node2 == node2.ID) || (e.Node1 == node2.ID && e.Node2 == node1.ID) {
+ return e, true
+ }
+ }
+
+ return Edge{}, false
+}
+
+func (ec EdgeSet) SymmetricDifference(ec2 EdgeSet) EdgeSet {
+ edgesToInclude := make(map[Edge]bool)
+
+ for _, e := range ec {
+ edgesToInclude[e] = true
+ }
+
+ for _, e := range ec2 {
+ edgesToInclude[e] = !edgesToInclude[e]
+ }
+
+ result := EdgeSet{}
+ for e, include := range edgesToInclude {
+ if include {
+ result = append(result, e)
+ }
+ }
+
+ return result
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
new file mode 100644
index 0000000..cd597a2
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
@@ -0,0 +1,8 @@
+package node
+
+type Node struct {
+ ID int
+ Value interface{}
+}
+
+type NodeOrderedSet []Node
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go
new file mode 100644
index 0000000..d76a1ee
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go
@@ -0,0 +1,7 @@
+package util
+
+import "math"
+
+func Odd(n int) bool {
+ return math.Mod(float64(n), 2.0) == 1.0
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go
new file mode 100644
index 0000000..dced241
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/type_support.go
@@ -0,0 +1,182 @@
+/*
+Gomega matchers
+
+This package implements the Gomega matchers and does not typically need to be imported.
+See the docs for Gomega for documentation on the matchers
+
+http://onsi.github.io/gomega/
+*/
+
+// untested sections: 11
+
+package matchers
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+type omegaMatcher interface {
+ Match(actual interface{}) (success bool, err error)
+ FailureMessage(actual interface{}) (message string)
+ NegatedFailureMessage(actual interface{}) (message string)
+}
+
+func isBool(a interface{}) bool {
+ return reflect.TypeOf(a).Kind() == reflect.Bool
+}
+
+func isNumber(a interface{}) bool {
+ if a == nil {
+ return false
+ }
+ kind := reflect.TypeOf(a).Kind()
+ return reflect.Int <= kind && kind <= reflect.Float64
+}
+
+func isInteger(a interface{}) bool {
+ kind := reflect.TypeOf(a).Kind()
+ return reflect.Int <= kind && kind <= reflect.Int64
+}
+
+func isUnsignedInteger(a interface{}) bool {
+ kind := reflect.TypeOf(a).Kind()
+ return reflect.Uint <= kind && kind <= reflect.Uint64
+}
+
+func isFloat(a interface{}) bool {
+ kind := reflect.TypeOf(a).Kind()
+ return reflect.Float32 <= kind && kind <= reflect.Float64
+}
+
+func toInteger(a interface{}) int64 {
+ if isInteger(a) {
+ return reflect.ValueOf(a).Int()
+ } else if isUnsignedInteger(a) {
+ return int64(reflect.ValueOf(a).Uint())
+ } else if isFloat(a) {
+ return int64(reflect.ValueOf(a).Float())
+ }
+ panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
+}
+
+func toUnsignedInteger(a interface{}) uint64 {
+ if isInteger(a) {
+ return uint64(reflect.ValueOf(a).Int())
+ } else if isUnsignedInteger(a) {
+ return reflect.ValueOf(a).Uint()
+ } else if isFloat(a) {
+ return uint64(reflect.ValueOf(a).Float())
+ }
+ panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
+}
+
+func toFloat(a interface{}) float64 {
+ if isInteger(a) {
+ return float64(reflect.ValueOf(a).Int())
+ } else if isUnsignedInteger(a) {
+ return float64(reflect.ValueOf(a).Uint())
+ } else if isFloat(a) {
+ return reflect.ValueOf(a).Float()
+ }
+ panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
+}
+
+func isError(a interface{}) bool {
+ _, ok := a.(error)
+ return ok
+}
+
+func isChan(a interface{}) bool {
+ if isNil(a) {
+ return false
+ }
+ return reflect.TypeOf(a).Kind() == reflect.Chan
+}
+
+func isMap(a interface{}) bool {
+ if a == nil {
+ return false
+ }
+ return reflect.TypeOf(a).Kind() == reflect.Map
+}
+
+func isArrayOrSlice(a interface{}) bool {
+ if a == nil {
+ return false
+ }
+ switch reflect.TypeOf(a).Kind() {
+ case reflect.Array, reflect.Slice:
+ return true
+ default:
+ return false
+ }
+}
+
+func isString(a interface{}) bool {
+ if a == nil {
+ return false
+ }
+ return reflect.TypeOf(a).Kind() == reflect.String
+}
+
+func toString(a interface{}) (string, bool) {
+ aString, isString := a.(string)
+ if isString {
+ return aString, true
+ }
+
+ aBytes, isBytes := a.([]byte)
+ if isBytes {
+ return string(aBytes), true
+ }
+
+ aStringer, isStringer := a.(fmt.Stringer)
+ if isStringer {
+ return aStringer.String(), true
+ }
+
+ aJSONRawMessage, isJSONRawMessage := a.(json.RawMessage)
+ if isJSONRawMessage {
+ return string(aJSONRawMessage), true
+ }
+
+ return "", false
+}
+
+func lengthOf(a interface{}) (int, bool) {
+ if a == nil {
+ return 0, false
+ }
+ switch reflect.TypeOf(a).Kind() {
+ case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice:
+ return reflect.ValueOf(a).Len(), true
+ default:
+ return 0, false
+ }
+}
+func capOf(a interface{}) (int, bool) {
+ if a == nil {
+ return 0, false
+ }
+ switch reflect.TypeOf(a).Kind() {
+ case reflect.Array, reflect.Chan, reflect.Slice:
+ return reflect.ValueOf(a).Cap(), true
+ default:
+ return 0, false
+ }
+}
+
+func isNil(a interface{}) bool {
+ if a == nil {
+ return true
+ }
+
+ switch reflect.TypeOf(a).Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return reflect.ValueOf(a).IsNil()
+ }
+
+ return false
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go
new file mode 100644
index 0000000..6f743b1
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go
@@ -0,0 +1,90 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/types"
+)
+
+type WithTransformMatcher struct {
+ // input
+ Transform interface{} // must be a function of one parameter that returns one value and an optional error
+ Matcher types.GomegaMatcher
+
+ // cached value
+ transformArgType reflect.Type
+
+ // state
+ transformedValue interface{}
+}
+
+// reflect.Type for error
+var errorT = reflect.TypeOf((*error)(nil)).Elem()
+
+func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher {
+ if transform == nil {
+ panic("transform function cannot be nil")
+ }
+ txType := reflect.TypeOf(transform)
+ if txType.NumIn() != 1 {
+ panic("transform function must have 1 argument")
+ }
+ if numout := txType.NumOut(); numout != 1 {
+ if numout != 2 || !txType.Out(1).AssignableTo(errorT) {
+ panic("transform function must either have 1 return value, or 1 return value plus 1 error value")
+ }
+ }
+
+ return &WithTransformMatcher{
+ Transform: transform,
+ Matcher: matcher,
+ transformArgType: reflect.TypeOf(transform).In(0),
+ }
+}
+
+func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
+ // prepare a parameter to pass to the Transform function
+ var param reflect.Value
+ if actual != nil && reflect.TypeOf(actual).AssignableTo(m.transformArgType) {
+ // The dynamic type of actual is compatible with the transform argument.
+ param = reflect.ValueOf(actual)
+
+ } else if actual == nil && m.transformArgType.Kind() == reflect.Interface {
+ // The dynamic type of actual is unknown, so there's no way to make its
+ // reflect.Value. Create a nil of the transform argument, which is known.
+ param = reflect.Zero(m.transformArgType)
+
+ } else {
+ return false, fmt.Errorf("Transform function expects '%s' but we have '%T'", m.transformArgType, actual)
+ }
+
+ // call the Transform function with `actual`
+ fn := reflect.ValueOf(m.Transform)
+ result := fn.Call([]reflect.Value{param})
+ if len(result) == 2 {
+ if !result[1].IsNil() {
+ return false, fmt.Errorf("Transform function failed: %s", result[1].Interface().(error).Error())
+ }
+ }
+ m.transformedValue = result[0].Interface() // expect exactly one value
+
+ return m.Matcher.Match(m.transformedValue)
+}
+
+func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) {
+ return m.Matcher.FailureMessage(m.transformedValue)
+}
+
+func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+ return m.Matcher.NegatedFailureMessage(m.transformedValue)
+}
+
+func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool {
+ // TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.)
+ //
+ // Querying the next matcher is fine if the transformer always will return the same value.
+ // But if the transformer is non-deterministic and returns a different value each time, then there
+ // is no point in querying the next matcher, since it can only comment on the last transformed value.
+ return types.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue)
+}
diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go
new file mode 100644
index 0000000..7c7adb9
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/types/types.go
@@ -0,0 +1,93 @@
+package types
+
+import (
+ "context"
+ "time"
+)
+
+type GomegaFailHandler func(message string, callerSkip ...int)
+
+// A simple *testing.T interface wrapper
+type GomegaTestingT interface {
+ Helper()
+ Fatalf(format string, args ...interface{})
+}
+
+// Gomega represents an object that can perform synchronous and assynchronous assertions with Gomega matchers
+type Gomega interface {
+ Ω(actual interface{}, extra ...interface{}) Assertion
+ Expect(actual interface{}, extra ...interface{}) Assertion
+ ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion
+
+ Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+ EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+
+ Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+ ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+
+ SetDefaultEventuallyTimeout(time.Duration)
+ SetDefaultEventuallyPollingInterval(time.Duration)
+ SetDefaultConsistentlyDuration(time.Duration)
+ SetDefaultConsistentlyPollingInterval(time.Duration)
+}
+
+// All Gomega matchers must implement the GomegaMatcher interface
+//
+// For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers
+type GomegaMatcher interface {
+ Match(actual interface{}) (success bool, err error)
+ FailureMessage(actual interface{}) (message string)
+ NegatedFailureMessage(actual interface{}) (message string)
+}
+
+/*
+GomegaMatchers that also match the OracleMatcher interface can convey information about
+whether or not their result will change upon future attempts.
+
+This allows `Eventually` and `Consistently` to short circuit if success becomes impossible.
+
+For example, a process' exit code can never change. So, gexec's Exit matcher returns `true`
+for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore.
+*/
+type OracleMatcher interface {
+ MatchMayChangeInTheFuture(actual interface{}) bool
+}
+
+func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool {
+ oracleMatcher, ok := matcher.(OracleMatcher)
+ if !ok {
+ return true
+ }
+
+ return oracleMatcher.MatchMayChangeInTheFuture(value)
+}
+
+// AsyncAssertions are returned by Eventually and Consistently and enable matchers to be polled repeatedly to ensure
+// they are eventually satisfied
+type AsyncAssertion interface {
+ Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+
+ WithOffset(offset int) AsyncAssertion
+ WithTimeout(interval time.Duration) AsyncAssertion
+ WithPolling(interval time.Duration) AsyncAssertion
+ Within(timeout time.Duration) AsyncAssertion
+ ProbeEvery(interval time.Duration) AsyncAssertion
+ WithContext(ctx context.Context) AsyncAssertion
+ WithArguments(argsToForward ...interface{}) AsyncAssertion
+ MustPassRepeatedly(count int) AsyncAssertion
+}
+
+// Assertions are returned by Ω and Expect and enable assertions against Gomega matchers
+type Assertion interface {
+ Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+
+ To(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ ToNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ NotTo(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+
+ WithOffset(offset int) Assertion
+
+ Error() Assertion
+}
diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
new file mode 100644
index 0000000..9159de0
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go_import_path: github.com/pkg/errors
+go:
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - tip
+
+script:
+ - make check
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
new file mode 100644
index 0000000..835ba3e
--- /dev/null
+++ b/vendor/github.com/pkg/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile
new file mode 100644
index 0000000..ce9d7cd
--- /dev/null
+++ b/vendor/github.com/pkg/errors/Makefile
@@ -0,0 +1,44 @@
+PKGS := github.com/pkg/errors
+SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
+GO := go
+
+check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
+
+test:
+ $(GO) test $(PKGS)
+
+vet: | test
+ $(GO) vet $(PKGS)
+
+staticcheck:
+ $(GO) get honnef.co/go/tools/cmd/staticcheck
+ staticcheck -checks all $(PKGS)
+
+misspell:
+ $(GO) get github.com/client9/misspell/cmd/misspell
+ misspell \
+ -locale GB \
+ -error \
+ *.md *.go
+
+unconvert:
+ $(GO) get github.com/mdempsky/unconvert
+ unconvert -v $(PKGS)
+
+ineffassign:
+ $(GO) get github.com/gordonklaus/ineffassign
+ find $(SRCDIRS) -name '*.go' | xargs ineffassign
+
+pedantic: check errcheck
+
+unparam:
+ $(GO) get mvdan.cc/unparam
+ unparam ./...
+
+errcheck:
+ $(GO) get github.com/kisielk/errcheck
+ errcheck $(PKGS)
+
+gofmt:
+ @echo Checking code is gofmted
+ @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
new file mode 100644
index 0000000..54dfdcb
--- /dev/null
+++ b/vendor/github.com/pkg/errors/README.md
@@ -0,0 +1,59 @@
+# errors [](https://travis-ci.org/pkg/errors) [](https://ci.appveyor.com/project/davecheney/errors/branch/master) [](http://godoc.org/github.com/pkg/errors) [](https://goreportcard.com/report/github.com/pkg/errors) [](https://sourcegraph.com/github.com/pkg/errors?badge)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+ return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+ return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+ Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+ // handle specifically
+default:
+ // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Roadmap
+
+With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
+
+- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
+- 1.0. Final release.
+
+## Contributing
+
+Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
+
+Before sending a PR, please discuss your change by raising an issue.
+
+## License
+
+BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml
new file mode 100644
index 0000000..a932ead
--- /dev/null
+++ b/vendor/github.com/pkg/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+ # some helpful output for debugging builds
+ - go version
+ - go env
+ # pre-installed MinGW at C:\MinGW is 32bit only
+ # but MSYS2 at C:\msys64 has mingw64
+ - set PATH=C:\msys64\mingw64\bin;%PATH%
+ - gcc --version
+ - g++ --version
+
+build_script:
+ - go install -v ./...
+
+test_script:
+ - set PATH=C:\gopath\bin;%PATH%
+ - go test -v ./...
+
+#artifacts:
+# - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
new file mode 100644
index 0000000..161aea2
--- /dev/null
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -0,0 +1,288 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+// if err != nil {
+// return err
+// }
+//
+// which when applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error by recording a stack trace at the point Wrap is called,
+// together with the supplied message. For example
+//
+// _, err := ioutil.ReadAll(r)
+// if err != nil {
+// return errors.Wrap(err, "read failed")
+// }
+//
+// If additional control is required, the errors.WithStack and
+// errors.WithMessage functions destructure errors.Wrap into its component
+// operations: annotating an error with a stack trace and with a message,
+// respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error that does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+// switch err := errors.Cause(err).(type) {
+// case *MyError:
+// // handle specifically
+// default:
+// // unknown error
+// }
+//
+// Although the causer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported:
+//
+// %s print the error. If the error has a Cause it will be
+// printed recursively.
+// %v see %s
+// %+v extended format. Each Frame of the error's StackTrace will
+// be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface:
+//
+// type stackTracer interface {
+// StackTrace() errors.StackTrace
+// }
+//
+// The returned errors.StackTrace type is defined as
+//
+// type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+// if err, ok := err.(stackTracer); ok {
+// for _, f := range err.StackTrace() {
+// fmt.Printf("%+s:%d\n", f, f)
+// }
+// }
+//
+// Although the stackTracer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+ "fmt"
+ "io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+ return &fundamental{
+ msg: message,
+ stack: callers(),
+ }
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+ return &fundamental{
+ msg: fmt.Sprintf(format, args...),
+ stack: callers(),
+ }
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+ msg string
+ *stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ io.WriteString(s, f.msg)
+ f.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, f.msg)
+ case 'q':
+ fmt.Fprintf(s, "%q", f.msg)
+ }
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+func WithStack(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+type withStack struct {
+ error
+ *stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withStack) Unwrap() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v", w.Cause())
+ w.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, w.Error())
+ case 'q':
+ fmt.Fprintf(s, "%q", w.Error())
+ }
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: message,
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is called, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: message,
+ }
+}
+
+// WithMessagef annotates err with the format specifier.
+// If err is nil, WithMessagef returns nil.
+func WithMessagef(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+}
+
+type withMessage struct {
+ cause error
+ msg string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error { return w.cause }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withMessage) Unwrap() error { return w.cause }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v\n", w.Cause())
+ io.WriteString(s, w.msg)
+ return
+ }
+ fallthrough
+ case 's', 'q':
+ io.WriteString(s, w.Error())
+ }
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+ type causer interface {
+ Cause() error
+ }
+
+ for err != nil {
+ cause, ok := err.(causer)
+ if !ok {
+ break
+ }
+ err = cause.Cause()
+ }
+ return err
+}
diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go
new file mode 100644
index 0000000..be0d10d
--- /dev/null
+++ b/vendor/github.com/pkg/errors/go113.go
@@ -0,0 +1,38 @@
+// +build go1.13
+
+package errors
+
+import (
+ stderrors "errors"
+)
+
+// Is reports whether any error in err's chain matches target.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error is considered to match a target if it is equal to that target or if
+// it implements a method Is(error) bool such that Is(target) returns true.
+func Is(err, target error) bool { return stderrors.Is(err, target) }
+
+// As finds the first error in err's chain that matches target, and if so, sets
+// target to that error value and returns true.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error matches target if the error's concrete value is assignable to the value
+// pointed to by target, or if the error has a method As(interface{}) bool such that
+// As(target) returns true. In the latter case, the As method is responsible for
+// setting target.
+//
+// As will panic if target is not a non-nil pointer to either a type that implements
+// error, or to any interface type. As returns false if err is nil.
+func As(err error, target interface{}) bool { return stderrors.As(err, target) }
+
+// Unwrap returns the result of calling the Unwrap method on err, if err's
+// type contains an Unwrap method returning error.
+// Otherwise, Unwrap returns nil.
+func Unwrap(err error) error {
+ return stderrors.Unwrap(err)
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
new file mode 100644
index 0000000..779a834
--- /dev/null
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -0,0 +1,177 @@
+package errors
+
+import (
+ "fmt"
+ "io"
+ "path"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+// For historical reasons if Frame is interpreted as a uintptr
+// its value represents the program counter + 1.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ file, _ := fn.FileLine(f.pc())
+ return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return 0
+ }
+ _, line := fn.FileLine(f.pc())
+ return line
+}
+
+// name returns the name of this function, if known.
+func (f Frame) name() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ return fn.Name()
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+// %s source file
+// %d source line
+// %n function name
+// %v equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+s function name and path of source file relative to the compile time
+// GOPATH separated by \n\t (\n\t)
+// %+v equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ switch {
+ case s.Flag('+'):
+ io.WriteString(s, f.name())
+ io.WriteString(s, "\n\t")
+ io.WriteString(s, f.file())
+ default:
+ io.WriteString(s, path.Base(f.file()))
+ }
+ case 'd':
+ io.WriteString(s, strconv.Itoa(f.line()))
+ case 'n':
+ io.WriteString(s, funcname(f.name()))
+ case 'v':
+ f.Format(s, 's')
+ io.WriteString(s, ":")
+ f.Format(s, 'd')
+ }
+}
+
+// MarshalText formats a stacktrace Frame as a text string. The output is the
+// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
+func (f Frame) MarshalText() ([]byte, error) {
+ name := f.name()
+ if name == "unknown" {
+ return []byte(name), nil
+ }
+ return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+// %s lists source files for each Frame in the stack
+// %v lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+v Prints filename, function, and line number for each Frame in the stack.
+func (st StackTrace) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case s.Flag('+'):
+ for _, f := range st {
+ io.WriteString(s, "\n")
+ f.Format(s, verb)
+ }
+ case s.Flag('#'):
+ fmt.Fprintf(s, "%#v", []Frame(st))
+ default:
+ st.formatSlice(s, verb)
+ }
+ case 's':
+ st.formatSlice(s, verb)
+ }
+}
+
+// formatSlice will format this StackTrace into the given buffer as a slice of
+// Frame, only valid when called with '%s' or '%v'.
+func (st StackTrace) formatSlice(s fmt.State, verb rune) {
+ io.WriteString(s, "[")
+ for i, f := range st {
+ if i > 0 {
+ io.WriteString(s, " ")
+ }
+ f.Format(s, verb)
+ }
+ io.WriteString(s, "]")
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case st.Flag('+'):
+ for _, pc := range *s {
+ f := Frame(pc)
+ fmt.Fprintf(st, "\n%+v", f)
+ }
+ }
+ }
+}
+
+func (s *stack) StackTrace() StackTrace {
+ f := make([]Frame, len(*s))
+ for i := 0; i < len(f); i++ {
+ f[i] = Frame((*s)[i])
+ }
+ return f
+}
+
+func callers() *stack {
+ const depth = 32
+ var pcs [depth]uintptr
+ n := runtime.Callers(3, pcs[:])
+ var st stack = pcs[0:n]
+ return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+ i := strings.LastIndex(name, "/")
+ name = name[i+1:]
+ i = strings.Index(name, ".")
+ return name[i+1:]
+}
diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE
new file mode 100644
index 0000000..c67dad6
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+ The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
new file mode 100644
index 0000000..003e99f
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
@@ -0,0 +1,772 @@
+// Package difflib is a partial port of Python difflib module.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// The following class and functions have been ported:
+//
+// - SequenceMatcher
+//
+// - unified_diff
+//
+// - context_diff
+//
+// Getting unified diffs was the main goal of the port. Keep in mind this code
+// is mostly suitable to output text differences in a human friendly way, there
+// are no guarantees generated diffs are consumable by patch(1).
+package difflib
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func calculateRatio(matches, length int) float64 {
+ if length > 0 {
+ return 2.0 * float64(matches) / float64(length)
+ }
+ return 1.0
+}
+
+type Match struct {
+ A int
+ B int
+ Size int
+}
+
+type OpCode struct {
+ Tag byte
+ I1 int
+ I2 int
+ J1 int
+ J2 int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching". The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk). The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence. This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence. That's what
+// catches peoples' eyes. The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff. This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "
" lines in HTML files). That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" .
+//
+// Timing: Basic R-O is cubic time worst case and quadratic time expected
+// case. SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+ a []string
+ b []string
+ b2j map[string][]int
+ IsJunk func(string) bool
+ autoJunk bool
+ bJunk map[string]struct{}
+ matchingBlocks []Match
+ fullBCount map[string]int
+ bPopular map[string]struct{}
+ opCodes []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+ m := SequenceMatcher{autoJunk: true}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+ isJunk func(string) bool) *SequenceMatcher {
+
+ m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+ m.SetSeq1(a)
+ m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+ if &a == &m.a {
+ return
+ }
+ m.a = a
+ m.matchingBlocks = nil
+ m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+ if &b == &m.b {
+ return
+ }
+ m.b = b
+ m.matchingBlocks = nil
+ m.opCodes = nil
+ m.fullBCount = nil
+ m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+ // Populate line -> index mapping
+ b2j := map[string][]int{}
+ for i, s := range m.b {
+ indices := b2j[s]
+ indices = append(indices, i)
+ b2j[s] = indices
+ }
+
+ // Purge junk elements
+ m.bJunk = map[string]struct{}{}
+ if m.IsJunk != nil {
+ junk := m.bJunk
+ for s, _ := range b2j {
+ if m.IsJunk(s) {
+ junk[s] = struct{}{}
+ }
+ }
+ for s, _ := range junk {
+ delete(b2j, s)
+ }
+ }
+
+ // Purge remaining popular elements
+ popular := map[string]struct{}{}
+ n := len(m.b)
+ if m.autoJunk && n >= 200 {
+ ntest := n/100 + 1
+ for s, indices := range b2j {
+ if len(indices) > ntest {
+ popular[s] = struct{}{}
+ }
+ }
+ for s, _ := range popular {
+ delete(b2j, s)
+ }
+ }
+ m.bPopular = popular
+ m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+ _, ok := m.bJunk[s]
+ return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+// alo <= i <= i+k <= ahi
+// blo <= j <= j+k <= bhi
+// and for all (i',j',k') meeting those conditions,
+// k >= k'
+// i <= i'
+// and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block. Then that block is extended as
+// far as possible by matching (only) junk elements on both sides. So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+ // CAUTION: stripping common prefix or suffix would be incorrect.
+ // E.g.,
+ // ab
+ // acab
+ // Longest matching block is "ab", but if common prefix is
+ // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
+ // strip, so ends up claiming that ab is changed to acab by
+ // inserting "ca" in the middle. That's minimal but unintuitive:
+ // "it's obvious" that someone inserted "ac" at the front.
+ // Windiff ends up at the same place as diff, but by pairing up
+ // the unique 'b's and then matching the first two 'a's.
+ besti, bestj, bestsize := alo, blo, 0
+
+ // find longest junk-free match
+ // during an iteration of the loop, j2len[j] = length of longest
+ // junk-free match ending with a[i-1] and b[j]
+ j2len := map[int]int{}
+ for i := alo; i != ahi; i++ {
+ // look at all instances of a[i] in b; note that because
+ // b2j has no junk keys, the loop is skipped if a[i] is junk
+ newj2len := map[int]int{}
+ for _, j := range m.b2j[m.a[i]] {
+ // a[i] matches b[j]
+ if j < blo {
+ continue
+ }
+ if j >= bhi {
+ break
+ }
+ k := j2len[j-1] + 1
+ newj2len[j] = k
+ if k > bestsize {
+ besti, bestj, bestsize = i-k+1, j-k+1, k
+ }
+ }
+ j2len = newj2len
+ }
+
+ // Extend the best by non-junk elements on each end. In particular,
+ // "popular" non-junk elements aren't in b2j, which greatly speeds
+ // the inner loop above, but also means "the best" match so far
+ // doesn't contain any junk *or* popular non-junk elements.
+ for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ !m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize += 1
+ }
+
+ // Now that we have a wholly interesting match (albeit possibly
+ // empty!), we may as well suck up the matching junk on each
+ // side of it too. Can't think of a good reason not to, and it
+ // saves post-processing the (possibly considerable) expense of
+ // figuring out what to do with it. In the case of an empty
+ // interesting match, this is clearly the right thing to do,
+ // because no other kind of match is possible in the regions.
+ for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize += 1
+ }
+
+ return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+ if m.matchingBlocks != nil {
+ return m.matchingBlocks
+ }
+
+ var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+ matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+ match := m.findLongestMatch(alo, ahi, blo, bhi)
+ i, j, k := match.A, match.B, match.Size
+ if match.Size > 0 {
+ if alo < i && blo < j {
+ matched = matchBlocks(alo, i, blo, j, matched)
+ }
+ matched = append(matched, match)
+ if i+k < ahi && j+k < bhi {
+ matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+ }
+ }
+ return matched
+ }
+ matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+ // It's possible that we have adjacent equal blocks in the
+ // matching_blocks list now.
+ nonAdjacent := []Match{}
+ i1, j1, k1 := 0, 0, 0
+ for _, b := range matched {
+ // Is this block adjacent to i1, j1, k1?
+ i2, j2, k2 := b.A, b.B, b.Size
+ if i1+k1 == i2 && j1+k1 == j2 {
+ // Yes, so collapse them -- this just increases the length of
+ // the first block by the length of the second, and the first
+ // block so lengthened remains the block to compare against.
+ k1 += k2
+ } else {
+ // Not adjacent. Remember the first block (k1==0 means it's
+ // the dummy we started with), and make the second block the
+ // new block to compare against.
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+ i1, j1, k1 = i2, j2, k2
+ }
+ }
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+
+ nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+ m.matchingBlocks = nonAdjacent
+ return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal): a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+ if m.opCodes != nil {
+ return m.opCodes
+ }
+ i, j := 0, 0
+ matching := m.GetMatchingBlocks()
+ opCodes := make([]OpCode, 0, len(matching))
+ for _, m := range matching {
+ // invariant: we've pumped out correct diffs to change
+ // a[:i] into b[:j], and the next matching block is
+ // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+ // out a diff to change a[i:ai] into b[j:bj], pump out
+ // the matching block, and move (i,j) beyond the match
+ ai, bj, size := m.A, m.B, m.Size
+ tag := byte(0)
+ if i < ai && j < bj {
+ tag = 'r'
+ } else if i < ai {
+ tag = 'd'
+ } else if j < bj {
+ tag = 'i'
+ }
+ if tag > 0 {
+ opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+ }
+ i, j = ai+size, bj+size
+ // the list of matching blocks is terminated by a
+ // sentinel with size 0
+ if size > 0 {
+ opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+ }
+ }
+ m.opCodes = opCodes
+ return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+ if n < 0 {
+ n = 3
+ }
+ codes := m.GetOpCodes()
+ if len(codes) == 0 {
+ codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
+ }
+ // Fixup leading and trailing groups if they show no changes.
+ if codes[0].Tag == 'e' {
+ c := codes[0]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+ }
+ if codes[len(codes)-1].Tag == 'e' {
+ c := codes[len(codes)-1]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+ }
+ nn := n + n
+ groups := [][]OpCode{}
+ group := []OpCode{}
+ for _, c := range codes {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ // End the current group and start a new one whenever
+ // there is a large range with no changes.
+ if c.Tag == 'e' && i2-i1 > nn {
+ group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
+ j1, min(j2, j1+n)})
+ groups = append(groups, group)
+ group = []OpCode{}
+ i1, j1 = max(i1, i2-n), max(j1, j2-n)
+ }
+ group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+ }
+ if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+ groups = append(groups, group)
+ }
+ return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+ matches := 0
+ for _, m := range m.GetMatchingBlocks() {
+ matches += m.Size
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+ // viewing a and b as multisets, set matches to the cardinality
+ // of their intersection; this counts the number of matches
+ // without regard to order, so is clearly an upper bound
+ if m.fullBCount == nil {
+ m.fullBCount = map[string]int{}
+ for _, s := range m.b {
+ m.fullBCount[s] = m.fullBCount[s] + 1
+ }
+ }
+
+ // avail[x] is the number of times x appears in 'b' less the
+ // number of times we've seen it in 'a' so far ... kinda
+ avail := map[string]int{}
+ matches := 0
+ for _, s := range m.a {
+ n, ok := avail[s]
+ if !ok {
+ n = m.fullBCount[s]
+ }
+ avail[s] = n - 1
+ if n > 0 {
+ matches += 1
+ }
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+ la, lb := len(m.a), len(m.b)
+ return calculateRatio(min(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ if length == 0 {
+ beginning -= 1 // empty ranges begin at line just before the range
+ }
+ return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+ A []string // First sequence lines
+ FromFile string // First file name
+ FromDate string // First file time
+ B []string // Second sequence lines
+ ToFile string // Second file name
+ ToDate string // Second file time
+ Eol string // Headers end of line, defaults to LF
+ Context int // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline. This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times. Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ wf := func(format string, args ...interface{}) error {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ return err
+ }
+ ws := func(s string) error {
+ _, err := buf.WriteString(s)
+ return err
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ first, last := g[0], g[len(g)-1]
+ range1 := formatRangeUnified(first.I1, last.I2)
+ range2 := formatRangeUnified(first.J1, last.J2)
+ if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+ return err
+ }
+ for _, c := range g {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ if c.Tag == 'e' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws(" " + line); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws("-" + line); err != nil {
+ return err
+ }
+ }
+ }
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, line := range diff.B[j1:j2] {
+ if err := ws("+" + line); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteUnifiedDiff(w, diff)
+ return string(w.Bytes()), err
+}
+
+// Convert range to the "ed" format.
+func formatRangeContext(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 0 {
+ beginning -= 1 // empty ranges begin at line just before the range
+ }
+ if length <= 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
+}
+
+type ContextDiff UnifiedDiff
+
+// Compare two sequences of lines; generate the delta as a context diff.
+//
+// Context diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by diff.Context
+// which defaults to three.
+//
+// By default, the diff control lines (those with *** or ---) are
+// created with a trailing newline.
+//
+// For inputs that do not have trailing newlines, set the diff.Eol
+// argument to "" so that the output will be uniformly newline free.
+//
+// The context diff format normally has a header for filenames and
+// modification times. Any or all of these may be specified using
+// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
+// The modification times are normally expressed in the ISO 8601 format.
+// If not specified, the strings default to blanks.
+func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ var diffErr error
+ wf := func(format string, args ...interface{}) {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
+ ws := func(s string) {
+ _, err := buf.WriteString(s)
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ prefix := map[byte]string{
+ 'i': "+ ",
+ 'd': "- ",
+ 'r': "! ",
+ 'e': " ",
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
+ }
+ }
+
+ first, last := g[0], g[len(g)-1]
+ ws("***************" + diff.Eol)
+
+ range1 := formatRangeContext(first.I1, last.I2)
+ wf("*** %s ****%s", range1, diff.Eol)
+ for _, c := range g {
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, cc := range g {
+ if cc.Tag == 'i' {
+ continue
+ }
+ for _, line := range diff.A[cc.I1:cc.I2] {
+ ws(prefix[cc.Tag] + line)
+ }
+ }
+ break
+ }
+ }
+
+ range2 := formatRangeContext(first.J1, last.J2)
+ wf("--- %s ----%s", range2, diff.Eol)
+ for _, c := range g {
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, cc := range g {
+ if cc.Tag == 'd' {
+ continue
+ }
+ for _, line := range diff.B[cc.J1:cc.J2] {
+ ws(prefix[cc.Tag] + line)
+ }
+ }
+ break
+ }
+ }
+ }
+ return diffErr
+}
+
+// Like WriteContextDiff but returns the diff a string.
+func GetContextDiffString(diff ContextDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteContextDiff(w, diff)
+ return string(w.Bytes()), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+ lines := strings.SplitAfter(s, "\n")
+ lines[len(lines)-1] += "\n"
+ return lines
+}
diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore
new file mode 100644
index 0000000..c7b459e
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.gitignore
@@ -0,0 +1,39 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
+# swap
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+# session
+Session.vim
+# temporary
+.netrwhist
+*~
+# auto-generated tag files
+tags
+
+*.exe
+cobra.test
+bin
+
+.idea/
+*.iml
diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml
new file mode 100644
index 0000000..2c8f480
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.golangci.yml
@@ -0,0 +1,57 @@
+# Copyright 2013-2023 The Cobra Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+run:
+ deadline: 5m
+
+linters:
+ disable-all: true
+ enable:
+ #- bodyclose
+ # - deadcode ! deprecated since v1.49.0; replaced by 'unused'
+ #- depguard
+ #- dogsled
+ #- dupl
+ - errcheck
+ #- exhaustive
+ #- funlen
+ #- gochecknoinits
+ - goconst
+ - gocritic
+ #- gocyclo
+ - gofmt
+ - goimports
+ #- gomnd
+ #- goprintffuncname
+ - gosec
+ - gosimple
+ - govet
+ - ineffassign
+ #- lll
+ - misspell
+ #- nakedret
+ #- noctx
+ - nolintlint
+ #- rowserrcheck
+ #- scopelint
+ - staticcheck
+ #- structcheck ! deprecated since v1.49.0; replaced by 'unused'
+ - stylecheck
+ #- typecheck
+ - unconvert
+ #- unparam
+ - unused
+ # - varcheck ! deprecated since v1.49.0; replaced by 'unused'
+ #- whitespace
+ fast: false
diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap
new file mode 100644
index 0000000..94ec530
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.mailmap
@@ -0,0 +1,3 @@
+Steve Francia
+Bjørn Erik Pedersen
+Fabiano Franz
diff --git a/vendor/github.com/spf13/cobra/CONDUCT.md b/vendor/github.com/spf13/cobra/CONDUCT.md
new file mode 100644
index 0000000..9d16f88
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/CONDUCT.md
@@ -0,0 +1,37 @@
+## Cobra User Contract
+
+### Versioning
+Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release.
+
+### Backward Compatibility
+We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released.
+
+### Deprecation
+Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github.
+
+### CVE
+Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one.
+
+### Communication
+Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors.
+
+### Breaking Changes
+Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra.
+
+There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version.
+
+Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release.
+
+Examples of breaking changes include:
+- Removing or renaming exported constant, variable, type, or function.
+- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc...
+ - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing.
+
+There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging.
+
+### CI Testing
+Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang.
+
+### Disclaimer
+Changes to this document and the contents therein are at the discretion of the maintainers.
+None of the contents of this document are legally binding in any way to the maintainers or the users.
diff --git a/vendor/github.com/spf13/cobra/CONTRIBUTING.md b/vendor/github.com/spf13/cobra/CONTRIBUTING.md
new file mode 100644
index 0000000..6f356e6
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/CONTRIBUTING.md
@@ -0,0 +1,50 @@
+# Contributing to Cobra
+
+Thank you so much for contributing to Cobra. We appreciate your time and help.
+Here are some guidelines to help you get started.
+
+## Code of Conduct
+
+Be kind and respectful to the members of the community. Take time to educate
+others who are seeking help. Harassment of any kind will not be tolerated.
+
+## Questions
+
+If you have questions regarding Cobra, feel free to ask it in the community
+[#cobra Slack channel][cobra-slack]
+
+## Filing a bug or feature
+
+1. Before filing an issue, please check the existing issues to see if a
+ similar one was already opened. If there is one already opened, feel free
+ to comment on it.
+1. If you believe you've found a bug, please provide detailed steps of
+ reproduction, the version of Cobra and anything else you believe will be
+ useful to help troubleshoot it (e.g. OS environment, environment variables,
+ etc...). Also state the current behavior vs. the expected behavior.
+1. If you'd like to see a feature or an enhancement please open an issue with
+ a clear title and description of what the feature is and why it would be
+ beneficial to the project and its users.
+
+## Submitting changes
+
+1. CLA: Upon submitting a Pull Request (PR), contributors will be prompted to
+ sign a CLA. Please sign the CLA :slightly_smiling_face:
+1. Tests: If you are submitting code, please ensure you have adequate tests
+ for the feature. Tests can be run via `go test ./...` or `make test`.
+1. Since this is golang project, ensure the new code is properly formatted to
+ ensure code consistency. Run `make all`.
+
+### Quick steps to contribute
+
+1. Fork the project.
+1. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`)
+1. Create your feature branch (`git checkout -b my-new-feature`)
+1. Make changes and run tests (`make test`)
+1. Add them to staging (`git add .`)
+1. Commit your changes (`git commit -m 'Add some feature'`)
+1. Push to the branch (`git push origin my-new-feature`)
+1. Create new pull request
+
+
+[cobra-slack]: https://gophers.slack.com/archives/CD3LP1199
diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt
new file mode 100644
index 0000000..298f0e2
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/LICENSE.txt
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/cobra/MAINTAINERS b/vendor/github.com/spf13/cobra/MAINTAINERS
new file mode 100644
index 0000000..4c5ac3d
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/MAINTAINERS
@@ -0,0 +1,13 @@
+maintainers:
+- spf13
+- johnSchnake
+- jpmcb
+- marckhouzam
+inactive:
+- anthonyfok
+- bep
+- bogem
+- broady
+- eparis
+- jharshman
+- wfernandes
diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile
new file mode 100644
index 0000000..0da8d7a
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/Makefile
@@ -0,0 +1,35 @@
+BIN="./bin"
+SRC=$(shell find . -name "*.go")
+
+ifeq (, $(shell which golangci-lint))
+$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh")
+endif
+
+.PHONY: fmt lint test install_deps clean
+
+default: all
+
+all: fmt test
+
+fmt:
+ $(info ******************** checking formatting ********************)
+ @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1)
+
+lint:
+ $(info ******************** running lint tools ********************)
+ golangci-lint run -v
+
+test: install_deps
+ $(info ******************** running tests ********************)
+ go test -v ./...
+
+richtest: install_deps
+ $(info ******************** running tests with kyoh86/richgo ********************)
+ richgo test -v ./...
+
+install_deps:
+ $(info ******************** downloading dependencies ********************)
+ go get -v ./...
+
+clean:
+ rm -rf $(BIN)
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
new file mode 100644
index 0000000..6444f4b
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -0,0 +1,112 @@
+
+
+Cobra is a library for creating powerful modern CLI applications.
+
+Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/),
+[Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to
+name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra.
+
+[](https://github.com/spf13/cobra/actions?query=workflow%3ATest)
+[](https://pkg.go.dev/github.com/spf13/cobra)
+[](https://goreportcard.com/report/github.com/spf13/cobra)
+[](https://gophers.slack.com/archives/CD3LP1199)
+
+# Overview
+
+Cobra is a library providing a simple interface to create powerful modern CLI
+interfaces similar to git & go tools.
+
+Cobra provides:
+* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
+* Fully POSIX-compliant flags (including short & long versions)
+* Nested subcommands
+* Global, local and cascading flags
+* Intelligent suggestions (`app srver`... did you mean `app server`?)
+* Automatic help generation for commands and flags
+* Grouping help for subcommands
+* Automatic help flag recognition of `-h`, `--help`, etc.
+* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell)
+* Automatically generated man pages for your application
+* Command aliases so you can change things without breaking them
+* The flexibility to define your own help, usage, etc.
+* Optional seamless integration with [viper](https://github.com/spf13/viper) for 12-factor apps
+
+# Concepts
+
+Cobra is built on a structure of commands, arguments & flags.
+
+**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
+
+The best applications read like sentences when used, and as a result, users
+intuitively know how to interact with them.
+
+The pattern to follow is
+`APPNAME VERB NOUN --ADJECTIVE`
+ or
+`APPNAME COMMAND ARG --FLAG`.
+
+A few good real world examples may better illustrate this point.
+
+In the following example, 'server' is a command, and 'port' is a flag:
+
+ hugo server --port=1313
+
+In this command we are telling Git to clone the url bare.
+
+ git clone URL --bare
+
+## Commands
+
+Command is the central point of the application. Each interaction that
+the application supports will be contained in a Command. A command can
+have children commands and optionally run an action.
+
+In the example above, 'server' is the command.
+
+[More about cobra.Command](https://pkg.go.dev/github.com/spf13/cobra#Command)
+
+## Flags
+
+A flag is a way to modify the behavior of a command. Cobra supports
+fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
+A Cobra command can define flags that persist through to children commands
+and flags that are only available to that command.
+
+In the example above, 'port' is the flag.
+
+Flag functionality is provided by the [pflag
+library](https://github.com/spf13/pflag), a fork of the flag standard library
+which maintains the same interface while adding POSIX compliance.
+
+# Installing
+Using Cobra is easy. First, use `go get` to install the latest version
+of the library.
+
+```
+go get -u github.com/spf13/cobra@latest
+```
+
+Next, include Cobra in your application:
+
+```go
+import "github.com/spf13/cobra"
+```
+
+# Usage
+`cobra-cli` is a command line program to generate cobra applications and command files.
+It will bootstrap your application scaffolding to rapidly
+develop a Cobra-based application. It is the easiest way to incorporate Cobra into your application.
+
+It can be installed by running:
+
+```
+go install github.com/spf13/cobra-cli@latest
+```
+
+For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md)
+
+For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md).
+
+# License
+
+Cobra is released under the Apache 2.0 license. See [LICENSE.txt](LICENSE.txt)
diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go
new file mode 100644
index 0000000..25c30e3
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/active_help.go
@@ -0,0 +1,60 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "os"
+)
+
+const (
+ activeHelpMarker = "_activeHelp_ "
+ // The below values should not be changed: programs will be using them explicitly
+ // in their user documentation, and users will be using them explicitly.
+ activeHelpEnvVarSuffix = "ACTIVE_HELP"
+ activeHelpGlobalEnvVar = configEnvVarGlobalPrefix + "_" + activeHelpEnvVarSuffix
+ activeHelpGlobalDisable = "0"
+)
+
+// AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp.
+// Such strings will be processed by the completion script and will be shown as ActiveHelp
+// to the user.
+// The array parameter should be the array that will contain the completions.
+// This function can be called multiple times before and/or after completions are added to
+// the array. Each time this function is called with the same array, the new
+// ActiveHelp line will be shown below the previous ones when completion is triggered.
+func AppendActiveHelp(compArray []string, activeHelpStr string) []string {
+ return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr))
+}
+
+// GetActiveHelpConfig returns the value of the ActiveHelp environment variable
+// _ACTIVE_HELP where is the name of the root command in upper
+// case, with all non-ASCII-alphanumeric characters replaced by `_`.
+// It will always return "0" if the global environment variable COBRA_ACTIVE_HELP
+// is set to "0".
+func GetActiveHelpConfig(cmd *Command) string {
+ activeHelpCfg := os.Getenv(activeHelpGlobalEnvVar)
+ if activeHelpCfg != activeHelpGlobalDisable {
+ activeHelpCfg = os.Getenv(activeHelpEnvVar(cmd.Root().Name()))
+ }
+ return activeHelpCfg
+}
+
+// activeHelpEnvVar returns the name of the program-specific ActiveHelp environment
+// variable. It has the format _ACTIVE_HELP where is the name of the
+// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`.
+func activeHelpEnvVar(name string) string {
+ return configEnvVar(name, activeHelpEnvVarSuffix)
+}
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
new file mode 100644
index 0000000..ed1e70c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -0,0 +1,131 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "strings"
+)
+
+type PositionalArgs func(cmd *Command, args []string) error
+
+// legacyArgs validation has the following behaviour:
+// - root commands with no subcommands can take arbitrary arguments
+// - root commands with subcommands will do subcommand validity checking
+// - subcommands will always accept arbitrary arguments
+func legacyArgs(cmd *Command, args []string) error {
+ // no subcommand, always take args
+ if !cmd.HasSubCommands() {
+ return nil
+ }
+
+ // root command with subcommands, do subcommand checking.
+ if !cmd.HasParent() && len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ return nil
+}
+
+// NoArgs returns an error if any args are included.
+func NoArgs(cmd *Command, args []string) error {
+ if len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
+ }
+ return nil
+}
+
+// OnlyValidArgs returns an error if there are any positional args that are not in
+// the `ValidArgs` field of `Command`
+func OnlyValidArgs(cmd *Command, args []string) error {
+ if len(cmd.ValidArgs) > 0 {
+ // Remove any description that may be included in ValidArgs.
+ // A description is following a tab character.
+ validArgs := make([]string, 0, len(cmd.ValidArgs))
+ for _, v := range cmd.ValidArgs {
+ validArgs = append(validArgs, strings.SplitN(v, "\t", 2)[0])
+ }
+ for _, v := range args {
+ if !stringInSlice(v, validArgs) {
+ return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ }
+ }
+ return nil
+}
+
+// ArbitraryArgs never returns an error.
+func ArbitraryArgs(cmd *Command, args []string) error {
+ return nil
+}
+
+// MinimumNArgs returns an error if there is not at least N args.
+func MinimumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < n {
+ return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// MaximumNArgs returns an error if there are more than N args.
+func MaximumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) > n {
+ return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// ExactArgs returns an error if there are not exactly n args.
+func ExactArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) != n {
+ return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// RangeArgs returns an error if the number of args is not within the expected range.
+func RangeArgs(min int, max int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < min || len(args) > max {
+ return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
+ }
+ return nil
+ }
+}
+
+// MatchAll allows combining several PositionalArgs to work in concert.
+func MatchAll(pargs ...PositionalArgs) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ for _, parg := range pargs {
+ if err := parg(cmd, args); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+// ExactValidArgs returns an error if there are not exactly N positional args OR
+// there are any positional args that are not in the `ValidArgs` field of `Command`
+//
+// Deprecated: use MatchAll(ExactArgs(n), OnlyValidArgs) instead
+func ExactValidArgs(n int) PositionalArgs {
+ return MatchAll(ExactArgs(n), OnlyValidArgs)
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
new file mode 100644
index 0000000..f4d198c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -0,0 +1,709 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+// Annotations for Bash completion.
+const (
+ BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
+ BashCompCustom = "cobra_annotation_bash_completion_custom"
+ BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
+ BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
+)
+
+func writePreamble(buf io.StringWriter, name string) {
+ WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`
+__%[1]s_debug()
+{
+ if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
+}
+
+# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
+# _init_completion. This is a very minimal version of that function.
+__%[1]s_init_completion()
+{
+ COMPREPLY=()
+ _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+__%[1]s_index_of_word()
+{
+ local w word=$1
+ shift
+ index=0
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ index=$((index+1))
+ done
+ index=-1
+}
+
+__%[1]s_contains_word()
+{
+ local w word=$1; shift
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ done
+ return 1
+}
+
+__%[1]s_handle_go_custom_completion()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}"
+
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+
+ local out requestComp lastParam lastChar comp directive args
+
+ # Prepare the command to request completions for the program.
+ # Calling ${words[0]} instead of directly %[1]s allows handling aliases
+ args=("${words[@]:1}")
+ # Disable ActiveHelp which is not supported for bash completion v1
+ requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}"
+
+ lastParam=${words[$((${#words[@]}-1))]}
+ lastChar=${lastParam:$((${#lastParam}-1)):1}
+ __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}"
+
+ if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter"
+ requestComp="${requestComp} \"\""
+ fi
+
+ __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}"
+ # Use eval to handle any environment variables and such
+ out=$(eval "${requestComp}" 2>/dev/null)
+
+ # Extract the directive integer at the very end of the output following a colon (:)
+ directive=${out##*:}
+ # Remove the directive
+ out=${out%%:*}
+ if [ "${directive}" = "${out}" ]; then
+ # There is not directive specified
+ directive=0
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
+ __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out}"
+
+ if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
+ # Error code. No completion.
+ __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code"
+ return
+ else
+ if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no space"
+ compopt -o nospace
+ fi
+ fi
+ if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no file completion"
+ compopt +o default
+ fi
+ fi
+ fi
+
+ if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
+ # File extension filtering
+ local fullFilter filter filteringCmd
+ # Do not use quotes around the $out variable or else newline
+ # characters will be kept.
+ for filter in ${out}; do
+ fullFilter+="$filter|"
+ done
+
+ filteringCmd="_filedir $fullFilter"
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ $filteringCmd
+ elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
+ # File completion for directories only
+ local subdir
+ # Use printf to strip any trailing newline
+ subdir=$(printf "%%s" "${out}")
+ if [ -n "$subdir" ]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ __%[1]s_handle_subdirs_in_dir_flag "$subdir"
+ else
+ __%[1]s_debug "Listing directories in ."
+ _filedir -d
+ fi
+ else
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${out}" -- "$cur")
+ fi
+}
+
+__%[1]s_handle_reply()
+{
+ __%[1]s_debug "${FUNCNAME[0]}"
+ local comp
+ case $cur in
+ -*)
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt -o nospace
+ fi
+ local allflags
+ if [ ${#must_have_one_flag[@]} -ne 0 ]; then
+ allflags=("${must_have_one_flag[@]}")
+ else
+ allflags=("${flags[*]} ${two_word_flags[*]}")
+ fi
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${allflags[*]}" -- "$cur")
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
+ fi
+
+ # complete after --flag=abc
+ if [[ $cur == *=* ]]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt +o nospace
+ fi
+
+ local index flag
+ flag="${cur%%=*}"
+ __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
+ COMPREPLY=()
+ if [[ ${index} -ge 0 ]]; then
+ PREFIX=""
+ cur="${cur#*=}"
+ ${flags_completion[${index}]}
+ if [ -n "${ZSH_VERSION:-}" ]; then
+ # zsh completion needs --flag= prefix
+ eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
+ fi
+ fi
+ fi
+
+ if [[ -z "${flag_parsing_disabled}" ]]; then
+ # If flag parsing is enabled, we have completed the flags and can return.
+ # If flag parsing is disabled, we may not know all (or any) of the flags, so we fallthrough
+ # to possibly call handle_go_custom_completion.
+ return 0;
+ fi
+ ;;
+ esac
+
+ # check if we are handling a flag with special work handling
+ local index
+ __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
+ if [[ ${index} -ge 0 ]]; then
+ ${flags_completion[${index}]}
+ return
+ fi
+
+ # we are parsing a flag and don't have a special handler, no completion
+ if [[ ${cur} != "${words[cword]}" ]]; then
+ return
+ fi
+
+ local completions
+ completions=("${commands[@]}")
+ if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
+ completions+=("${must_have_one_noun[@]}")
+ elif [[ -n "${has_completion_function}" ]]; then
+ # if a go completion function is provided, defer to that function
+ __%[1]s_handle_go_custom_completion
+ fi
+ if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
+ completions+=("${must_have_one_flag[@]}")
+ fi
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${completions[*]}" -- "$cur")
+
+ if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${noun_aliases[*]}" -- "$cur")
+ fi
+
+ if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
+ if declare -F __%[1]s_custom_func >/dev/null; then
+ # try command name qualified custom func
+ __%[1]s_custom_func
+ else
+ # otherwise fall back to unqualified for compatibility
+ declare -F __custom_func >/dev/null && __custom_func
+ fi
+ fi
+
+ # available in bash-completion >= 2, not always present on macOS
+ if declare -F __ltrim_colon_completions >/dev/null; then
+ __ltrim_colon_completions "$cur"
+ fi
+
+ # If there is only 1 completion and it is a flag with an = it will be completed
+ # but we don't want a space after the =
+ if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
+ compopt -o nospace
+ fi
+}
+
+# The arguments should be in the form "ext1|ext2|extn"
+__%[1]s_handle_filename_extension_flag()
+{
+ local ext="$1"
+ _filedir "@(${ext})"
+}
+
+__%[1]s_handle_subdirs_in_dir_flag()
+{
+ local dir="$1"
+ pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
+}
+
+__%[1]s_handle_flag()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ # if a command required a flag, and we found it, unset must_have_one_flag()
+ local flagname=${words[c]}
+ local flagvalue=""
+ # if the word contained an =
+ if [[ ${words[c]} == *"="* ]]; then
+ flagvalue=${flagname#*=} # take in as flagvalue after the =
+ flagname=${flagname%%=*} # strip everything after the =
+ flagname="${flagname}=" # but put the = back
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
+ if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
+ must_have_one_flag=()
+ fi
+
+ # if you set a flag which only applies to this command, don't show subcommands
+ if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
+ commands=()
+ fi
+
+ # keep flag value with flagname as flaghash
+ # flaghash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then
+ if [ -n "${flagvalue}" ] ; then
+ flaghash[${flagname}]=${flagvalue}
+ elif [ -n "${words[ $((c+1)) ]}" ] ; then
+ flaghash[${flagname}]=${words[ $((c+1)) ]}
+ else
+ flaghash[${flagname}]="true" # pad "true" for bool flag
+ fi
+ fi
+
+ # skip the argument to a two word flag
+ if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
+ __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
+ c=$((c+1))
+ # if we are looking for a flags value, don't show commands
+ if [[ $c -eq $cword ]]; then
+ commands=()
+ fi
+ fi
+
+ c=$((c+1))
+
+}
+
+__%[1]s_handle_noun()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
+ must_have_one_noun=()
+ elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
+ must_have_one_noun=()
+ fi
+
+ nouns+=("${words[c]}")
+ c=$((c+1))
+}
+
+__%[1]s_handle_command()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ local next_command
+ if [[ -n ${last_command} ]]; then
+ next_command="_${last_command}_${words[c]//:/__}"
+ else
+ if [[ $c -eq 0 ]]; then
+ next_command="_%[1]s_root_command"
+ else
+ next_command="_${words[c]//:/__}"
+ fi
+ fi
+ c=$((c+1))
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
+ declare -F "$next_command" >/dev/null && $next_command
+}
+
+__%[1]s_handle_word()
+{
+ if [[ $c -ge $cword ]]; then
+ __%[1]s_handle_reply
+ return
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+ if [[ "${words[c]}" == -* ]]; then
+ __%[1]s_handle_flag
+ elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
+ __%[1]s_handle_command
+ elif [[ $c -eq 0 ]]; then
+ __%[1]s_handle_command
+ elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
+ # aliashash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then
+ words[c]=${aliashash[${words[c]}]}
+ __%[1]s_handle_command
+ else
+ __%[1]s_handle_noun
+ fi
+ else
+ __%[1]s_handle_noun
+ fi
+ __%[1]s_handle_word
+}
+
+`, name, ShellCompNoDescRequestCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
+}
+
+func writePostscript(buf io.StringWriter, name string) {
+ name = strings.ReplaceAll(name, ":", "__")
+ WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`{
+ local cur prev words cword split
+ declare -A flaghash 2>/dev/null || :
+ declare -A aliashash 2>/dev/null || :
+ if declare -F _init_completion >/dev/null 2>&1; then
+ _init_completion -s || return
+ else
+ __%[1]s_init_completion -n "=" || return
+ fi
+
+ local c=0
+ local flag_parsing_disabled=
+ local flags=()
+ local two_word_flags=()
+ local local_nonpersistent_flags=()
+ local flags_with_completion=()
+ local flags_completion=()
+ local commands=("%[1]s")
+ local command_aliases=()
+ local must_have_one_flag=()
+ local must_have_one_noun=()
+ local has_completion_function=""
+ local last_command=""
+ local nouns=()
+ local noun_aliases=()
+
+ __%[1]s_handle_word
+}
+
+`, name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%s %s
+else
+ complete -o default -o nospace -F __start_%s %s
+fi
+
+`, name, name, name, name))
+ WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n")
+}
+
+func writeCommands(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " commands=()\n")
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() && c != cmd.helpCommand {
+ continue
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name()))
+ writeCmdAliases(buf, c)
+ }
+ WriteStringAndCheck(buf, "\n")
+}
+
+func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) {
+ for key, value := range annotations {
+ switch key {
+ case BashCompFilenameExt:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) > 0 {
+ ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
+ } else {
+ ext = "_filedir"
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ case BashCompCustom:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ if len(value) > 0 {
+ handlers := strings.Join(value, "; ")
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
+ } else {
+ WriteStringAndCheck(buf, " flags_completion+=(:)\n")
+ }
+ case BashCompSubdirsInDir:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) == 1 {
+ ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
+ } else {
+ ext = "_filedir -d"
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ }
+ }
+}
+
+const cbn = "\")\n"
+
+func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) {
+ name := flag.Shorthand
+ format := " "
+ if len(flag.NoOptDefVal) == 0 {
+ format += "two_word_"
+ }
+ format += "flags+=(\"-%s" + cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
+}
+
+func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) {
+ name := flag.Name
+ format := " flags+=(\"--%s"
+ if len(flag.NoOptDefVal) == 0 {
+ format += "="
+ }
+ format += cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ if len(flag.NoOptDefVal) == 0 {
+ format = " two_word_flags+=(\"--%s" + cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ }
+ writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
+}
+
+func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) {
+ name := flag.Name
+ format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn
+ if len(flag.NoOptDefVal) == 0 {
+ format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ if len(flag.Shorthand) > 0 {
+ WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand))
+ }
+}
+
+// prepareCustomAnnotationsForFlags setup annotations for go completions for registered flags
+func prepareCustomAnnotationsForFlags(cmd *Command) {
+ flagCompletionMutex.RLock()
+ defer flagCompletionMutex.RUnlock()
+ for flag := range flagCompletionFunctions {
+ // Make sure the completion script calls the __*_go_custom_completion function for
+ // every registered flag. We need to do this here (and not when the flag was registered
+ // for completion) so that we can know the root command name for the prefix
+ // of ___go_custom_completion
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())}
+ }
+}
+
+func writeFlags(buf io.StringWriter, cmd *Command) {
+ prepareCustomAnnotationsForFlags(cmd)
+ WriteStringAndCheck(buf, ` flags=()
+ two_word_flags=()
+ local_nonpersistent_flags=()
+ flags_with_completion=()
+ flags_completion=()
+
+`)
+
+ if cmd.DisableFlagParsing {
+ WriteStringAndCheck(buf, " flag_parsing_disabled=1\n")
+ }
+
+ localNonPersistentFlags := cmd.LocalNonPersistentFlags()
+ cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ // localNonPersistentFlags are used to stop the completion of subcommands when one is set
+ // if TraverseChildren is true we should allow to complete subcommands
+ if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren {
+ writeLocalNonPersistentFlag(buf, flag)
+ }
+ })
+ cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ })
+
+ WriteStringAndCheck(buf, "\n")
+}
+
+func writeRequiredFlag(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " must_have_one_flag=()\n")
+ flags := cmd.NonInheritedFlags()
+ flags.VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ if _, ok := flag.Annotations[BashCompOneRequiredFlag]; ok {
+ format := " must_have_one_flag+=(\"--%s"
+ if flag.Value.Type() != "bool" {
+ format += "="
+ }
+ format += cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name))
+
+ if len(flag.Shorthand) > 0 {
+ WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand))
+ }
+ }
+ })
+}
+
+func writeRequiredNouns(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " must_have_one_noun=()\n")
+ sort.Strings(cmd.ValidArgs)
+ for _, value := range cmd.ValidArgs {
+ // Remove any description that may be included following a tab character.
+ // Descriptions are not supported by bash completion.
+ value = strings.SplitN(value, "\t", 2)[0]
+ WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
+ }
+ if cmd.ValidArgsFunction != nil {
+ WriteStringAndCheck(buf, " has_completion_function=1\n")
+ }
+}
+
+func writeCmdAliases(buf io.StringWriter, cmd *Command) {
+ if len(cmd.Aliases) == 0 {
+ return
+ }
+
+ sort.Strings(cmd.Aliases)
+
+ WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then`, "\n"))
+ for _, value := range cmd.Aliases {
+ WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value))
+ WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
+ }
+ WriteStringAndCheck(buf, ` fi`)
+ WriteStringAndCheck(buf, "\n")
+}
+func writeArgAliases(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " noun_aliases=()\n")
+ sort.Strings(cmd.ArgAliases)
+ for _, value := range cmd.ArgAliases {
+ WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value))
+ }
+}
+
+func gen(buf io.StringWriter, cmd *Command) {
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() && c != cmd.helpCommand {
+ continue
+ }
+ gen(buf, c)
+ }
+ commandName := cmd.CommandPath()
+ commandName = strings.ReplaceAll(commandName, " ", "_")
+ commandName = strings.ReplaceAll(commandName, ":", "__")
+
+ if cmd.Root() == cmd {
+ WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName))
+ } else {
+ WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName))
+ }
+
+ WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName))
+ WriteStringAndCheck(buf, "\n")
+ WriteStringAndCheck(buf, " command_aliases=()\n")
+ WriteStringAndCheck(buf, "\n")
+
+ writeCommands(buf, cmd)
+ writeFlags(buf, cmd)
+ writeRequiredFlag(buf, cmd)
+ writeRequiredNouns(buf, cmd)
+ writeArgAliases(buf, cmd)
+ WriteStringAndCheck(buf, "}\n\n")
+}
+
+// GenBashCompletion generates bash completion file and writes to the passed writer.
+func (c *Command) GenBashCompletion(w io.Writer) error {
+ buf := new(bytes.Buffer)
+ writePreamble(buf, c.Name())
+ if len(c.BashCompletionFunction) > 0 {
+ buf.WriteString(c.BashCompletionFunction + "\n")
+ }
+ gen(buf, c)
+ writePostscript(buf, c.Name())
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func nonCompletableFlag(flag *pflag.Flag) bool {
+ return flag.Hidden || len(flag.Deprecated) > 0
+}
+
+// GenBashCompletionFile generates bash completion file.
+func (c *Command) GenBashCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenBashCompletion(outFile)
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go
new file mode 100644
index 0000000..1cce5c3
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go
@@ -0,0 +1,396 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genBashComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func genBashComp(buf io.StringWriter, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+
+ WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*-
+
+__%[1]s_debug()
+{
+ if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
+}
+
+# Macs have bash3 for which the bash-completion package doesn't include
+# _init_completion. This is a minimal version of that function.
+__%[1]s_init_completion()
+{
+ COMPREPLY=()
+ _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+# This function calls the %[1]s program to obtain the completion
+# results and the directive. It fills the 'out' and 'directive' vars.
+__%[1]s_get_completion_results() {
+ local requestComp lastParam lastChar args
+
+ # Prepare the command to request completions for the program.
+ # Calling ${words[0]} instead of directly %[1]s allows handling aliases
+ args=("${words[@]:1}")
+ requestComp="${words[0]} %[2]s ${args[*]}"
+
+ lastParam=${words[$((${#words[@]}-1))]}
+ lastChar=${lastParam:$((${#lastParam}-1)):1}
+ __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}"
+
+ if [[ -z ${cur} && ${lastChar} != = ]]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "Adding extra empty parameter"
+ requestComp="${requestComp} ''"
+ fi
+
+ # When completing a flag with an = (e.g., %[1]s -n=)
+ # bash focuses on the part after the =, so we need to remove
+ # the flag part from $cur
+ if [[ ${cur} == -*=* ]]; then
+ cur="${cur#*=}"
+ fi
+
+ __%[1]s_debug "Calling ${requestComp}"
+ # Use eval to handle any environment variables and such
+ out=$(eval "${requestComp}" 2>/dev/null)
+
+ # Extract the directive integer at the very end of the output following a colon (:)
+ directive=${out##*:}
+ # Remove the directive
+ out=${out%%:*}
+ if [[ ${directive} == "${out}" ]]; then
+ # There is not directive specified
+ directive=0
+ fi
+ __%[1]s_debug "The completion directive is: ${directive}"
+ __%[1]s_debug "The completions are: ${out}"
+}
+
+__%[1]s_process_completion_results() {
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+ local shellCompDirectiveKeepOrder=%[8]d
+
+ if (((directive & shellCompDirectiveError) != 0)); then
+ # Error code. No completion.
+ __%[1]s_debug "Received error from custom completion go code"
+ return
+ else
+ if (((directive & shellCompDirectiveNoSpace) != 0)); then
+ if [[ $(type -t compopt) == builtin ]]; then
+ __%[1]s_debug "Activating no space"
+ compopt -o nospace
+ else
+ __%[1]s_debug "No space directive not supported in this version of bash"
+ fi
+ fi
+ if (((directive & shellCompDirectiveKeepOrder) != 0)); then
+ if [[ $(type -t compopt) == builtin ]]; then
+ # no sort isn't supported for bash less than < 4.4
+ if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then
+ __%[1]s_debug "No sort directive not supported in this version of bash"
+ else
+ __%[1]s_debug "Activating keep order"
+ compopt -o nosort
+ fi
+ else
+ __%[1]s_debug "No sort directive not supported in this version of bash"
+ fi
+ fi
+ if (((directive & shellCompDirectiveNoFileComp) != 0)); then
+ if [[ $(type -t compopt) == builtin ]]; then
+ __%[1]s_debug "Activating no file completion"
+ compopt +o default
+ else
+ __%[1]s_debug "No file completion directive not supported in this version of bash"
+ fi
+ fi
+ fi
+
+ # Separate activeHelp from normal completions
+ local completions=()
+ local activeHelp=()
+ __%[1]s_extract_activeHelp
+
+ if (((directive & shellCompDirectiveFilterFileExt) != 0)); then
+ # File extension filtering
+ local fullFilter filter filteringCmd
+
+ # Do not use quotes around the $completions variable or else newline
+ # characters will be kept.
+ for filter in ${completions[*]}; do
+ fullFilter+="$filter|"
+ done
+
+ filteringCmd="_filedir $fullFilter"
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ $filteringCmd
+ elif (((directive & shellCompDirectiveFilterDirs) != 0)); then
+ # File completion for directories only
+
+ local subdir
+ subdir=${completions[0]}
+ if [[ -n $subdir ]]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
+ else
+ __%[1]s_debug "Listing directories in ."
+ _filedir -d
+ fi
+ else
+ __%[1]s_handle_completion_types
+ fi
+
+ __%[1]s_handle_special_char "$cur" :
+ __%[1]s_handle_special_char "$cur" =
+
+ # Print the activeHelp statements before we finish
+ if ((${#activeHelp[*]} != 0)); then
+ printf "\n";
+ printf "%%s\n" "${activeHelp[@]}"
+ printf "\n"
+
+ # The prompt format is only available from bash 4.4.
+ # We test if it is available before using it.
+ if (x=${PS1@P}) 2> /dev/null; then
+ printf "%%s" "${PS1@P}${COMP_LINE[@]}"
+ else
+ # Can't print the prompt. Just print the
+ # text the user had typed, it is workable enough.
+ printf "%%s" "${COMP_LINE[@]}"
+ fi
+ fi
+}
+
+# Separate activeHelp lines from real completions.
+# Fills the $activeHelp and $completions arrays.
+__%[1]s_extract_activeHelp() {
+ local activeHelpMarker="%[9]s"
+ local endIndex=${#activeHelpMarker}
+
+ while IFS='' read -r comp; do
+ if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then
+ comp=${comp:endIndex}
+ __%[1]s_debug "ActiveHelp found: $comp"
+ if [[ -n $comp ]]; then
+ activeHelp+=("$comp")
+ fi
+ else
+ # Not an activeHelp line but a normal completion
+ completions+=("$comp")
+ fi
+ done <<<"${out}"
+}
+
+__%[1]s_handle_completion_types() {
+ __%[1]s_debug "__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE"
+
+ case $COMP_TYPE in
+ 37|42)
+ # Type: menu-complete/menu-complete-backward and insert-completions
+ # If the user requested inserting one completion at a time, or all
+ # completions at once on the command-line we must remove the descriptions.
+ # https://github.com/spf13/cobra/issues/1508
+ local tab=$'\t' comp
+ while IFS='' read -r comp; do
+ [[ -z $comp ]] && continue
+ # Strip any description
+ comp=${comp%%%%$tab*}
+ # Only consider the completions that match
+ if [[ $comp == "$cur"* ]]; then
+ COMPREPLY+=("$comp")
+ fi
+ done < <(printf "%%s\n" "${completions[@]}")
+ ;;
+
+ *)
+ # Type: complete (normal completion)
+ __%[1]s_handle_standard_completion_case
+ ;;
+ esac
+}
+
+__%[1]s_handle_standard_completion_case() {
+ local tab=$'\t' comp
+
+ # Short circuit to optimize if we don't have descriptions
+ if [[ "${completions[*]}" != *$tab* ]]; then
+ IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur")
+ return 0
+ fi
+
+ local longest=0
+ local compline
+ # Look for the longest completion so that we can format things nicely
+ while IFS='' read -r compline; do
+ [[ -z $compline ]] && continue
+ # Strip any description before checking the length
+ comp=${compline%%%%$tab*}
+ # Only consider the completions that match
+ [[ $comp == "$cur"* ]] || continue
+ COMPREPLY+=("$compline")
+ if ((${#comp}>longest)); then
+ longest=${#comp}
+ fi
+ done < <(printf "%%s\n" "${completions[@]}")
+
+ # If there is a single completion left, remove the description text
+ if ((${#COMPREPLY[*]} == 1)); then
+ __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
+ comp="${COMPREPLY[0]%%%%$tab*}"
+ __%[1]s_debug "Removed description from single completion, which is now: ${comp}"
+ COMPREPLY[0]=$comp
+ else # Format the descriptions
+ __%[1]s_format_comp_descriptions $longest
+ fi
+}
+
+__%[1]s_handle_special_char()
+{
+ local comp="$1"
+ local char=$2
+ if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then
+ local word=${comp%%"${comp##*${char}}"}
+ local idx=${#COMPREPLY[*]}
+ while ((--idx >= 0)); do
+ COMPREPLY[idx]=${COMPREPLY[idx]#"$word"}
+ done
+ fi
+}
+
+__%[1]s_format_comp_descriptions()
+{
+ local tab=$'\t'
+ local comp desc maxdesclength
+ local longest=$1
+
+ local i ci
+ for ci in ${!COMPREPLY[*]}; do
+ comp=${COMPREPLY[ci]}
+ # Properly format the description string which follows a tab character if there is one
+ if [[ "$comp" == *$tab* ]]; then
+ __%[1]s_debug "Original comp: $comp"
+ desc=${comp#*$tab}
+ comp=${comp%%%%$tab*}
+
+ # $COLUMNS stores the current shell width.
+ # Remove an extra 4 because we add 2 spaces and 2 parentheses.
+ maxdesclength=$(( COLUMNS - longest - 4 ))
+
+ # Make sure we can fit a description of at least 8 characters
+ # if we are to align the descriptions.
+ if ((maxdesclength > 8)); then
+ # Add the proper number of spaces to align the descriptions
+ for ((i = ${#comp} ; i < longest ; i++)); do
+ comp+=" "
+ done
+ else
+ # Don't pad the descriptions so we can fit more text after the completion
+ maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
+ fi
+
+ # If there is enough space for any description text,
+ # truncate the descriptions that are too long for the shell width
+ if ((maxdesclength > 0)); then
+ if ((${#desc} > maxdesclength)); then
+ desc=${desc:0:$(( maxdesclength - 1 ))}
+ desc+="…"
+ fi
+ comp+=" ($desc)"
+ fi
+ COMPREPLY[ci]=$comp
+ __%[1]s_debug "Final comp: $comp"
+ fi
+ done
+}
+
+__start_%[1]s()
+{
+ local cur prev words cword split
+
+ COMPREPLY=()
+
+ # Call _init_completion from the bash-completion package
+ # to prepare the arguments properly
+ if declare -F _init_completion >/dev/null 2>&1; then
+ _init_completion -n =: || return
+ else
+ __%[1]s_init_completion -n =: || return
+ fi
+
+ __%[1]s_debug
+ __%[1]s_debug "========= starting completion logic =========="
+ __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $cword location, so we need
+ # to truncate the command-line ($words) up to the $cword location.
+ words=("${words[@]:0:$cword+1}")
+ __%[1]s_debug "Truncated words[*]: ${words[*]},"
+
+ local out directive
+ __%[1]s_get_completion_results
+ __%[1]s_process_completion_results
+}
+
+if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%[1]s %[1]s
+else
+ complete -o default -o nospace -F __start_%[1]s %[1]s
+fi
+
+# ex: ts=4 sw=4 et filetype=sh
+`, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder,
+ activeHelpMarker))
+}
+
+// GenBashCompletionFileV2 generates Bash completion version 2.
+func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenBashCompletionV2(outFile, includeDesc)
+}
+
+// GenBashCompletionV2 generates Bash completion file version 2
+// and writes it to the passed writer.
+func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error {
+ return c.genBashCompletion(w, includeDesc)
+}
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
new file mode 100644
index 0000000..e0b0947
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -0,0 +1,242 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Commands similar to git, go tools and other modern CLI tools
+// inspired by go, go-Commander, gh and subcommand
+
+package cobra
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "text/template"
+ "time"
+ "unicode"
+)
+
+var templateFuncs = template.FuncMap{
+ "trim": strings.TrimSpace,
+ "trimRightSpace": trimRightSpace,
+ "trimTrailingWhitespaces": trimRightSpace,
+ "appendIfNotPresent": appendIfNotPresent,
+ "rpad": rpad,
+ "gt": Gt,
+ "eq": Eq,
+}
+
+var initializers []func()
+var finalizers []func()
+
+const (
+ defaultPrefixMatching = false
+ defaultCommandSorting = true
+ defaultCaseInsensitive = false
+ defaultTraverseRunHooks = false
+)
+
+// EnablePrefixMatching allows setting automatic prefix matching. Automatic prefix matching can be a dangerous thing
+// to automatically enable in CLI tools.
+// Set this to true to enable it.
+var EnablePrefixMatching = defaultPrefixMatching
+
+// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
+// To disable sorting, set it to false.
+var EnableCommandSorting = defaultCommandSorting
+
+// EnableCaseInsensitive allows case-insensitive commands names. (case sensitive by default)
+var EnableCaseInsensitive = defaultCaseInsensitive
+
+// EnableTraverseRunHooks executes persistent pre-run and post-run hooks from all parents.
+// By default this is disabled, which means only the first run hook to be found is executed.
+var EnableTraverseRunHooks = defaultTraverseRunHooks
+
+// MousetrapHelpText enables an information splash screen on Windows
+// if the CLI is started from explorer.exe.
+// To disable the mousetrap, just set this variable to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapHelpText = `This is a command line tool.
+
+You need to open cmd.exe and run it from there.
+`
+
+// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows
+// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed.
+// To disable the mousetrap, just set MousetrapHelpText to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapDisplayDuration = 5 * time.Second
+
+// AddTemplateFunc adds a template function that's available to Usage and Help
+// template generation.
+func AddTemplateFunc(name string, tmplFunc interface{}) {
+ templateFuncs[name] = tmplFunc
+}
+
+// AddTemplateFuncs adds multiple template functions that are available to Usage and
+// Help template generation.
+func AddTemplateFuncs(tmplFuncs template.FuncMap) {
+ for k, v := range tmplFuncs {
+ templateFuncs[k] = v
+ }
+}
+
+// OnInitialize sets the passed functions to be run when each command's
+// Execute method is called.
+func OnInitialize(y ...func()) {
+ initializers = append(initializers, y...)
+}
+
+// OnFinalize sets the passed functions to be run when each command's
+// Execute method is terminated.
+func OnFinalize(y ...func()) {
+ finalizers = append(finalizers, y...)
+}
+
+// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
+// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
+// ints and then compared.
+func Gt(a interface{}, b interface{}) bool {
+ var left, right int64
+ av := reflect.ValueOf(a)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ left = int64(av.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ left = av.Int()
+ case reflect.String:
+ left, _ = strconv.ParseInt(av.String(), 10, 64)
+ }
+
+ bv := reflect.ValueOf(b)
+
+ switch bv.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ right = int64(bv.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ right = bv.Int()
+ case reflect.String:
+ right, _ = strconv.ParseInt(bv.String(), 10, 64)
+ }
+
+ return left > right
+}
+
+// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
+func Eq(a interface{}, b interface{}) bool {
+ av := reflect.ValueOf(a)
+ bv := reflect.ValueOf(b)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ panic("Eq called on unsupported type")
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return av.Int() == bv.Int()
+ case reflect.String:
+ return av.String() == bv.String()
+ }
+ return false
+}
+
+func trimRightSpace(s string) string {
+ return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
+func appendIfNotPresent(s, stringToAppend string) string {
+ if strings.Contains(s, stringToAppend) {
+ return s
+ }
+ return s + " " + stringToAppend
+}
+
+// rpad adds padding to the right of a string.
+func rpad(s string, padding int) string {
+ formattedString := fmt.Sprintf("%%-%ds", padding)
+ return fmt.Sprintf(formattedString, s)
+}
+
+// tmpl executes the given template text on data, writing the result to w.
+func tmpl(w io.Writer, text string, data interface{}) error {
+ t := template.New("top")
+ t.Funcs(templateFuncs)
+ template.Must(t.Parse(text))
+ return t.Execute(w, data)
+}
+
+// ld compares two strings and returns the levenshtein distance between them.
+func ld(s, t string, ignoreCase bool) int {
+ if ignoreCase {
+ s = strings.ToLower(s)
+ t = strings.ToLower(t)
+ }
+ d := make([][]int, len(s)+1)
+ for i := range d {
+ d[i] = make([]int, len(t)+1)
+ d[i][0] = i
+ }
+ for j := range d[0] {
+ d[0][j] = j
+ }
+ for j := 1; j <= len(t); j++ {
+ for i := 1; i <= len(s); i++ {
+ if s[i-1] == t[j-1] {
+ d[i][j] = d[i-1][j-1]
+ } else {
+ min := d[i-1][j]
+ if d[i][j-1] < min {
+ min = d[i][j-1]
+ }
+ if d[i-1][j-1] < min {
+ min = d[i-1][j-1]
+ }
+ d[i][j] = min + 1
+ }
+ }
+
+ }
+ return d[len(s)][len(t)]
+}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing.
+func CheckErr(msg interface{}) {
+ if msg != nil {
+ fmt.Fprintln(os.Stderr, "Error:", msg)
+ os.Exit(1)
+ }
+}
+
+// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil.
+func WriteStringAndCheck(b io.StringWriter, s string) {
+ _, err := b.WriteString(s)
+ CheckErr(err)
+}
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
new file mode 100644
index 0000000..54748fc
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -0,0 +1,1896 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
+// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
+package cobra
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+const (
+ FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra"
+ CommandDisplayNameAnnotation = "cobra_annotation_command_display_name"
+)
+
+// FParseErrWhitelist configures Flag parse errors to be ignored
+type FParseErrWhitelist flag.ParseErrorsWhitelist
+
+// Group Structure to manage groups for commands
+type Group struct {
+ ID string
+ Title string
+}
+
+// Command is just that, a command for your application.
+// E.g. 'go run ...' - 'run' is the command. Cobra requires
+// you to define the usage and description as part of your command
+// definition to ensure usability.
+type Command struct {
+ // Use is the one-line usage message.
+ // Recommended syntax is as follows:
+ // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required.
+ // ... indicates that you can specify multiple values for the previous argument.
+ // | indicates mutually exclusive information. You can use the argument to the left of the separator or the
+ // argument to the right of the separator. You cannot use both arguments in a single use of the command.
+ // { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are
+ // optional, they are enclosed in brackets ([ ]).
+ // Example: add [-F file | -D dir]... [-f format] profile
+ Use string
+
+ // Aliases is an array of aliases that can be used instead of the first word in Use.
+ Aliases []string
+
+ // SuggestFor is an array of command names for which this command will be suggested -
+ // similar to aliases but only suggests.
+ SuggestFor []string
+
+ // Short is the short description shown in the 'help' output.
+ Short string
+
+ // The group id under which this subcommand is grouped in the 'help' output of its parent.
+ GroupID string
+
+ // Long is the long message shown in the 'help ' output.
+ Long string
+
+ // Example is examples of how to use the command.
+ Example string
+
+ // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions
+ ValidArgs []string
+ // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion.
+ // It is a dynamic version of using ValidArgs.
+ // Only one of ValidArgs and ValidArgsFunction can be used for a command.
+ ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
+
+ // Expected arguments
+ Args PositionalArgs
+
+ // ArgAliases is List of aliases for ValidArgs.
+ // These are not suggested to the user in the shell completion,
+ // but accepted if entered manually.
+ ArgAliases []string
+
+ // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator.
+ // For portability with other shells, it is recommended to instead use ValidArgsFunction
+ BashCompletionFunction string
+
+ // Deprecated defines, if this command is deprecated and should print this string when used.
+ Deprecated string
+
+ // Annotations are key/value pairs that can be used by applications to identify or
+ // group commands or set special options.
+ Annotations map[string]string
+
+ // Version defines the version for this command. If this value is non-empty and the command does not
+ // define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
+ // will print content of the "Version" variable. A shorthand "v" flag will also be added if the
+ // command does not define one.
+ Version string
+
+ // The *Run functions are executed in the following order:
+ // * PersistentPreRun()
+ // * PreRun()
+ // * Run()
+ // * PostRun()
+ // * PersistentPostRun()
+ // All functions get the same args, the arguments after the command name.
+ // The *PreRun and *PostRun functions will only be executed if the Run function of the current
+ // command has been declared.
+ //
+ // PersistentPreRun: children of this command will inherit and execute.
+ PersistentPreRun func(cmd *Command, args []string)
+ // PersistentPreRunE: PersistentPreRun but returns an error.
+ PersistentPreRunE func(cmd *Command, args []string) error
+ // PreRun: children of this command will not inherit.
+ PreRun func(cmd *Command, args []string)
+ // PreRunE: PreRun but returns an error.
+ PreRunE func(cmd *Command, args []string) error
+ // Run: Typically the actual work function. Most commands will only implement this.
+ Run func(cmd *Command, args []string)
+ // RunE: Run but returns an error.
+ RunE func(cmd *Command, args []string) error
+ // PostRun: run after the Run command.
+ PostRun func(cmd *Command, args []string)
+ // PostRunE: PostRun but returns an error.
+ PostRunE func(cmd *Command, args []string) error
+ // PersistentPostRun: children of this command will inherit and execute after PostRun.
+ PersistentPostRun func(cmd *Command, args []string)
+ // PersistentPostRunE: PersistentPostRun but returns an error.
+ PersistentPostRunE func(cmd *Command, args []string) error
+
+ // groups for subcommands
+ commandgroups []*Group
+
+ // args is actual args parsed from flags.
+ args []string
+ // flagErrorBuf contains all error messages from pflag.
+ flagErrorBuf *bytes.Buffer
+ // flags is full set of flags.
+ flags *flag.FlagSet
+ // pflags contains persistent flags.
+ pflags *flag.FlagSet
+ // lflags contains local flags.
+ // This field does not represent internal state, it's used as a cache to optimise LocalFlags function call
+ lflags *flag.FlagSet
+ // iflags contains inherited flags.
+ // This field does not represent internal state, it's used as a cache to optimise InheritedFlags function call
+ iflags *flag.FlagSet
+ // parentsPflags is all persistent flags of cmd's parents.
+ parentsPflags *flag.FlagSet
+ // globNormFunc is the global normalization function
+ // that we can use on every pflag set and children commands
+ globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
+
+ // usageFunc is usage func defined by user.
+ usageFunc func(*Command) error
+ // usageTemplate is usage template defined by user.
+ usageTemplate string
+ // flagErrorFunc is func defined by user and it's called when the parsing of
+ // flags returns an error.
+ flagErrorFunc func(*Command, error) error
+ // helpTemplate is help template defined by user.
+ helpTemplate string
+ // helpFunc is help func defined by user.
+ helpFunc func(*Command, []string)
+ // helpCommand is command with usage 'help'. If it's not defined by user,
+ // cobra uses default help command.
+ helpCommand *Command
+ // helpCommandGroupID is the group id for the helpCommand
+ helpCommandGroupID string
+
+ // completionCommandGroupID is the group id for the completion command
+ completionCommandGroupID string
+
+ // versionTemplate is the version template defined by user.
+ versionTemplate string
+
+ // errPrefix is the error message prefix defined by user.
+ errPrefix string
+
+ // inReader is a reader defined by the user that replaces stdin
+ inReader io.Reader
+ // outWriter is a writer defined by the user that replaces stdout
+ outWriter io.Writer
+ // errWriter is a writer defined by the user that replaces stderr
+ errWriter io.Writer
+
+ // FParseErrWhitelist flag parse errors to be ignored
+ FParseErrWhitelist FParseErrWhitelist
+
+ // CompletionOptions is a set of options to control the handling of shell completion
+ CompletionOptions CompletionOptions
+
+ // commandsAreSorted defines, if command slice are sorted or not.
+ commandsAreSorted bool
+ // commandCalledAs is the name or alias value used to call this command.
+ commandCalledAs struct {
+ name string
+ called bool
+ }
+
+ ctx context.Context
+
+ // commands is the list of commands supported by this program.
+ commands []*Command
+ // parent is a parent command for this command.
+ parent *Command
+ // Max lengths of commands' string lengths for use in padding.
+ commandsMaxUseLen int
+ commandsMaxCommandPathLen int
+ commandsMaxNameLen int
+
+ // TraverseChildren parses flags on all parents before executing child command.
+ TraverseChildren bool
+
+ // Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
+ Hidden bool
+
+ // SilenceErrors is an option to quiet errors down stream.
+ SilenceErrors bool
+
+ // SilenceUsage is an option to silence usage when an error occurs.
+ SilenceUsage bool
+
+ // DisableFlagParsing disables the flag parsing.
+ // If this is true all flags will be passed to the command as arguments.
+ DisableFlagParsing bool
+
+ // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
+ // will be printed by generating docs for this command.
+ DisableAutoGenTag bool
+
+ // DisableFlagsInUseLine will disable the addition of [flags] to the usage
+ // line of a command when printing help or generating docs
+ DisableFlagsInUseLine bool
+
+ // DisableSuggestions disables the suggestions based on Levenshtein distance
+ // that go along with 'unknown command' messages.
+ DisableSuggestions bool
+
+ // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
+ // Must be > 0.
+ SuggestionsMinimumDistance int
+}
+
+// Context returns underlying command context. If command was executed
+// with ExecuteContext or the context was set with SetContext, the
+// previously set context will be returned. Otherwise, nil is returned.
+//
+// Notice that a call to Execute and ExecuteC will replace a nil context of
+// a command with a context.Background, so a background context will be
+// returned by Context after one of these functions has been called.
+func (c *Command) Context() context.Context {
+ return c.ctx
+}
+
+// SetContext sets context for the command. This context will be overwritten by
+// Command.ExecuteContext or Command.ExecuteContextC.
+func (c *Command) SetContext(ctx context.Context) {
+ c.ctx = ctx
+}
+
+// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
+// particularly useful when testing.
+func (c *Command) SetArgs(a []string) {
+ c.args = a
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+// Deprecated: Use SetOut and/or SetErr instead
+func (c *Command) SetOutput(output io.Writer) {
+ c.outWriter = output
+ c.errWriter = output
+}
+
+// SetOut sets the destination for usage messages.
+// If newOut is nil, os.Stdout is used.
+func (c *Command) SetOut(newOut io.Writer) {
+ c.outWriter = newOut
+}
+
+// SetErr sets the destination for error messages.
+// If newErr is nil, os.Stderr is used.
+func (c *Command) SetErr(newErr io.Writer) {
+ c.errWriter = newErr
+}
+
+// SetIn sets the source for input data
+// If newIn is nil, os.Stdin is used.
+func (c *Command) SetIn(newIn io.Reader) {
+ c.inReader = newIn
+}
+
+// SetUsageFunc sets usage function. Usage can be defined by application.
+func (c *Command) SetUsageFunc(f func(*Command) error) {
+ c.usageFunc = f
+}
+
+// SetUsageTemplate sets usage template. Can be defined by Application.
+func (c *Command) SetUsageTemplate(s string) {
+ c.usageTemplate = s
+}
+
+// SetFlagErrorFunc sets a function to generate an error when flag parsing
+// fails.
+func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
+ c.flagErrorFunc = f
+}
+
+// SetHelpFunc sets help function. Can be defined by Application.
+func (c *Command) SetHelpFunc(f func(*Command, []string)) {
+ c.helpFunc = f
+}
+
+// SetHelpCommand sets help command.
+func (c *Command) SetHelpCommand(cmd *Command) {
+ c.helpCommand = cmd
+}
+
+// SetHelpCommandGroupID sets the group id of the help command.
+func (c *Command) SetHelpCommandGroupID(groupID string) {
+ if c.helpCommand != nil {
+ c.helpCommand.GroupID = groupID
+ }
+ // helpCommandGroupID is used if no helpCommand is defined by the user
+ c.helpCommandGroupID = groupID
+}
+
+// SetCompletionCommandGroupID sets the group id of the completion command.
+func (c *Command) SetCompletionCommandGroupID(groupID string) {
+ // completionCommandGroupID is used if no completion command is defined by the user
+ c.Root().completionCommandGroupID = groupID
+}
+
+// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
+func (c *Command) SetHelpTemplate(s string) {
+ c.helpTemplate = s
+}
+
+// SetVersionTemplate sets version template to be used. Application can use it to set custom template.
+func (c *Command) SetVersionTemplate(s string) {
+ c.versionTemplate = s
+}
+
+// SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix.
+func (c *Command) SetErrPrefix(s string) {
+ c.errPrefix = s
+}
+
+// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
+// The user should not have a cyclic dependency on commands.
+func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
+ c.Flags().SetNormalizeFunc(n)
+ c.PersistentFlags().SetNormalizeFunc(n)
+ c.globNormFunc = n
+
+ for _, command := range c.commands {
+ command.SetGlobalNormalizationFunc(n)
+ }
+}
+
+// OutOrStdout returns output to stdout.
+func (c *Command) OutOrStdout() io.Writer {
+ return c.getOut(os.Stdout)
+}
+
+// OutOrStderr returns output to stderr
+func (c *Command) OutOrStderr() io.Writer {
+ return c.getOut(os.Stderr)
+}
+
+// ErrOrStderr returns output to stderr
+func (c *Command) ErrOrStderr() io.Writer {
+ return c.getErr(os.Stderr)
+}
+
+// InOrStdin returns input to stdin
+func (c *Command) InOrStdin() io.Reader {
+ return c.getIn(os.Stdin)
+}
+
+func (c *Command) getOut(def io.Writer) io.Writer {
+ if c.outWriter != nil {
+ return c.outWriter
+ }
+ if c.HasParent() {
+ return c.parent.getOut(def)
+ }
+ return def
+}
+
+func (c *Command) getErr(def io.Writer) io.Writer {
+ if c.errWriter != nil {
+ return c.errWriter
+ }
+ if c.HasParent() {
+ return c.parent.getErr(def)
+ }
+ return def
+}
+
+func (c *Command) getIn(def io.Reader) io.Reader {
+ if c.inReader != nil {
+ return c.inReader
+ }
+ if c.HasParent() {
+ return c.parent.getIn(def)
+ }
+ return def
+}
+
+// UsageFunc returns either the function set by SetUsageFunc for this command
+// or a parent, or it returns a default usage function.
+func (c *Command) UsageFunc() (f func(*Command) error) {
+ if c.usageFunc != nil {
+ return c.usageFunc
+ }
+ if c.HasParent() {
+ return c.Parent().UsageFunc()
+ }
+ return func(c *Command) error {
+ c.mergePersistentFlags()
+ err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c)
+ if err != nil {
+ c.PrintErrln(err)
+ }
+ return err
+ }
+}
+
+// Usage puts out the usage for the command.
+// Used when a user provides invalid input.
+// Can be defined by user by overriding UsageFunc.
+func (c *Command) Usage() error {
+ return c.UsageFunc()(c)
+}
+
+// HelpFunc returns either the function set by SetHelpFunc for this command
+// or a parent, or it returns a function with default help behavior.
+func (c *Command) HelpFunc() func(*Command, []string) {
+ if c.helpFunc != nil {
+ return c.helpFunc
+ }
+ if c.HasParent() {
+ return c.Parent().HelpFunc()
+ }
+ return func(c *Command, a []string) {
+ c.mergePersistentFlags()
+ // The help should be sent to stdout
+ // See https://github.com/spf13/cobra/issues/1002
+ err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
+ if err != nil {
+ c.PrintErrln(err)
+ }
+ }
+}
+
+// Help puts out the help for the command.
+// Used when a user calls help [command].
+// Can be defined by user by overriding HelpFunc.
+func (c *Command) Help() error {
+ c.HelpFunc()(c, []string{})
+ return nil
+}
+
+// UsageString returns usage string.
+func (c *Command) UsageString() string {
+ // Storing normal writers
+ tmpOutput := c.outWriter
+ tmpErr := c.errWriter
+
+ bb := new(bytes.Buffer)
+ c.outWriter = bb
+ c.errWriter = bb
+
+ CheckErr(c.Usage())
+
+ // Setting things back to normal
+ c.outWriter = tmpOutput
+ c.errWriter = tmpErr
+
+ return bb.String()
+}
+
+// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
+// command or a parent, or it returns a function which returns the original
+// error.
+func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
+ if c.flagErrorFunc != nil {
+ return c.flagErrorFunc
+ }
+
+ if c.HasParent() {
+ return c.parent.FlagErrorFunc()
+ }
+ return func(c *Command, err error) error {
+ return err
+ }
+}
+
+var minUsagePadding = 25
+
+// UsagePadding return padding for the usage.
+func (c *Command) UsagePadding() int {
+ if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
+ return minUsagePadding
+ }
+ return c.parent.commandsMaxUseLen
+}
+
+var minCommandPathPadding = 11
+
+// CommandPathPadding return padding for the command path.
+func (c *Command) CommandPathPadding() int {
+ if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
+ return minCommandPathPadding
+ }
+ return c.parent.commandsMaxCommandPathLen
+}
+
+var minNamePadding = 11
+
+// NamePadding returns padding for the name.
+func (c *Command) NamePadding() int {
+ if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen {
+ return minNamePadding
+ }
+ return c.parent.commandsMaxNameLen
+}
+
+// UsageTemplate returns usage template for the command.
+func (c *Command) UsageTemplate() string {
+ if c.usageTemplate != "" {
+ return c.usageTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.UsageTemplate()
+ }
+ return `Usage:{{if .Runnable}}
+ {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
+ {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
+
+Aliases:
+ {{.NameAndAliases}}{{end}}{{if .HasExample}}
+
+Examples:
+{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}}
+
+Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}}
+
+{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}}
+
+Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
+
+Flags:
+{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
+
+Global Flags:
+{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
+
+Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
+ {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
+
+Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
+`
+}
+
+// HelpTemplate return help template for the command.
+func (c *Command) HelpTemplate() string {
+ if c.helpTemplate != "" {
+ return c.helpTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.HelpTemplate()
+ }
+ return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
+
+{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
+}
+
+// VersionTemplate return version template for the command.
+func (c *Command) VersionTemplate() string {
+ if c.versionTemplate != "" {
+ return c.versionTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.VersionTemplate()
+ }
+ return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
+`
+}
+
+// ErrPrefix return error message prefix for the command
+func (c *Command) ErrPrefix() string {
+ if c.errPrefix != "" {
+ return c.errPrefix
+ }
+
+ if c.HasParent() {
+ return c.parent.ErrPrefix()
+ }
+ return "Error:"
+}
+
+func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ flag := fs.Lookup(name)
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ if len(name) == 0 {
+ return false
+ }
+
+ flag := fs.ShorthandLookup(name[:1])
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func stripFlags(args []string, c *Command) []string {
+ if len(args) == 0 {
+ return args
+ }
+ c.mergePersistentFlags()
+
+ commands := []string{}
+ flags := c.Flags()
+
+Loop:
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ switch {
+ case s == "--":
+ // "--" terminates the flags
+ break Loop
+ case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+ // If '--flag arg' then
+ // delete arg from args.
+ fallthrough // (do the same as below)
+ case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+ // If '-f arg' then
+ // delete 'arg' from args or break the loop if len(args) <= 1.
+ if len(args) <= 1 {
+ break Loop
+ } else {
+ args = args[1:]
+ continue
+ }
+ case s != "" && !strings.HasPrefix(s, "-"):
+ commands = append(commands, s)
+ }
+ }
+
+ return commands
+}
+
+// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like
+// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
+// Special care needs to be taken not to remove a flag value.
+func (c *Command) argsMinusFirstX(args []string, x string) []string {
+ if len(args) == 0 {
+ return args
+ }
+ c.mergePersistentFlags()
+ flags := c.Flags()
+
+Loop:
+ for pos := 0; pos < len(args); pos++ {
+ s := args[pos]
+ switch {
+ case s == "--":
+ // -- means we have reached the end of the parseable args. Break out of the loop now.
+ break Loop
+ case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+ fallthrough
+ case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+ // This is a flag without a default value, and an equal sign is not used. Increment pos in order to skip
+ // over the next arg, because that is the value of this flag.
+ pos++
+ continue
+ case !strings.HasPrefix(s, "-"):
+ // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so,
+ // return the args, excluding the one at this position.
+ if s == x {
+ ret := make([]string, 0, len(args)-1)
+ ret = append(ret, args[:pos]...)
+ ret = append(ret, args[pos+1:]...)
+ return ret
+ }
+ }
+ }
+ return args
+}
+
+func isFlagArg(arg string) bool {
+ return ((len(arg) >= 3 && arg[0:2] == "--") ||
+ (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
+}
+
+// Find the target command given the args and command tree
+// Meant to be run on the highest node. Only searches down.
+func (c *Command) Find(args []string) (*Command, []string, error) {
+ var innerfind func(*Command, []string) (*Command, []string)
+
+ innerfind = func(c *Command, innerArgs []string) (*Command, []string) {
+ argsWOflags := stripFlags(innerArgs, c)
+ if len(argsWOflags) == 0 {
+ return c, innerArgs
+ }
+ nextSubCmd := argsWOflags[0]
+
+ cmd := c.findNext(nextSubCmd)
+ if cmd != nil {
+ return innerfind(cmd, c.argsMinusFirstX(innerArgs, nextSubCmd))
+ }
+ return c, innerArgs
+ }
+
+ commandFound, a := innerfind(c, args)
+ if commandFound.Args == nil {
+ return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound))
+ }
+ return commandFound, a, nil
+}
+
+func (c *Command) findSuggestions(arg string) string {
+ if c.DisableSuggestions {
+ return ""
+ }
+ if c.SuggestionsMinimumDistance <= 0 {
+ c.SuggestionsMinimumDistance = 2
+ }
+ var sb strings.Builder
+ if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 {
+ sb.WriteString("\n\nDid you mean this?\n")
+ for _, s := range suggestions {
+ _, _ = fmt.Fprintf(&sb, "\t%v\n", s)
+ }
+ }
+ return sb.String()
+}
+
+func (c *Command) findNext(next string) *Command {
+ matches := make([]*Command, 0)
+ for _, cmd := range c.commands {
+ if commandNameMatches(cmd.Name(), next) || cmd.HasAlias(next) {
+ cmd.commandCalledAs.name = next
+ return cmd
+ }
+ if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) {
+ matches = append(matches, cmd)
+ }
+ }
+
+ if len(matches) == 1 {
+ // Temporarily disable gosec G602, which produces a false positive.
+ // See https://github.com/securego/gosec/issues/1005.
+ return matches[0] // #nosec G602
+ }
+
+ return nil
+}
+
+// Traverse the command tree to find the command, and parse args for
+// each parent.
+func (c *Command) Traverse(args []string) (*Command, []string, error) {
+ flags := []string{}
+ inFlag := false
+
+ for i, arg := range args {
+ switch {
+ // A long flag with a space separated value
+ case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="):
+ // TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
+ inFlag = !hasNoOptDefVal(arg[2:], c.Flags())
+ flags = append(flags, arg)
+ continue
+ // A short flag with a space separated value
+ case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()):
+ inFlag = true
+ flags = append(flags, arg)
+ continue
+ // The value for a flag
+ case inFlag:
+ inFlag = false
+ flags = append(flags, arg)
+ continue
+ // A flag without a value, or with an `=` separated value
+ case isFlagArg(arg):
+ flags = append(flags, arg)
+ continue
+ }
+
+ cmd := c.findNext(arg)
+ if cmd == nil {
+ return c, args, nil
+ }
+
+ if err := c.ParseFlags(flags); err != nil {
+ return nil, args, err
+ }
+ return cmd.Traverse(args[i+1:])
+ }
+ return c, args, nil
+}
+
+// SuggestionsFor provides suggestions for the typedName.
+func (c *Command) SuggestionsFor(typedName string) []string {
+ suggestions := []string{}
+ for _, cmd := range c.commands {
+ if cmd.IsAvailableCommand() {
+ levenshteinDistance := ld(typedName, cmd.Name(), true)
+ suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance
+ suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName))
+ if suggestByLevenshtein || suggestByPrefix {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ for _, explicitSuggestion := range cmd.SuggestFor {
+ if strings.EqualFold(typedName, explicitSuggestion) {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ }
+ }
+ }
+ return suggestions
+}
+
+// VisitParents visits all parents of the command and invokes fn on each parent.
+func (c *Command) VisitParents(fn func(*Command)) {
+ if c.HasParent() {
+ fn(c.Parent())
+ c.Parent().VisitParents(fn)
+ }
+}
+
+// Root finds root command.
+func (c *Command) Root() *Command {
+ if c.HasParent() {
+ return c.Parent().Root()
+ }
+ return c
+}
+
+// ArgsLenAtDash will return the length of c.Flags().Args at the moment
+// when a -- was found during args parsing.
+func (c *Command) ArgsLenAtDash() int {
+ return c.Flags().ArgsLenAtDash()
+}
+
+func (c *Command) execute(a []string) (err error) {
+ if c == nil {
+ return fmt.Errorf("called Execute() on a nil Command")
+ }
+
+ if len(c.Deprecated) > 0 {
+ c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated)
+ }
+
+ // initialize help and version flag at the last point possible to allow for user
+ // overriding
+ c.InitDefaultHelpFlag()
+ c.InitDefaultVersionFlag()
+
+ err = c.ParseFlags(a)
+ if err != nil {
+ return c.FlagErrorFunc()(c, err)
+ }
+
+ // If help is called, regardless of other flags, return we want help.
+ // Also say we need help if the command isn't runnable.
+ helpVal, err := c.Flags().GetBool("help")
+ if err != nil {
+ // should be impossible to get here as we always declare a help
+ // flag in InitDefaultHelpFlag()
+ c.Println("\"help\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+
+ if helpVal {
+ return flag.ErrHelp
+ }
+
+ // for back-compat, only add version flag behavior if version is defined
+ if c.Version != "" {
+ versionVal, err := c.Flags().GetBool("version")
+ if err != nil {
+ c.Println("\"version\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+ if versionVal {
+ err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ return err
+ }
+ }
+
+ if !c.Runnable() {
+ return flag.ErrHelp
+ }
+
+ c.preRun()
+
+ defer c.postRun()
+
+ argWoFlags := c.Flags().Args()
+ if c.DisableFlagParsing {
+ argWoFlags = a
+ }
+
+ if err := c.ValidateArgs(argWoFlags); err != nil {
+ return err
+ }
+
+ parents := make([]*Command, 0, 5)
+ for p := c; p != nil; p = p.Parent() {
+ if EnableTraverseRunHooks {
+ // When EnableTraverseRunHooks is set:
+ // - Execute all persistent pre-runs from the root parent till this command.
+ // - Execute all persistent post-runs from this command till the root parent.
+ parents = append([]*Command{p}, parents...)
+ } else {
+ // Otherwise, execute only the first found persistent hook.
+ parents = append(parents, p)
+ }
+ }
+ for _, p := range parents {
+ if p.PersistentPreRunE != nil {
+ if err := p.PersistentPreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ if !EnableTraverseRunHooks {
+ break
+ }
+ } else if p.PersistentPreRun != nil {
+ p.PersistentPreRun(c, argWoFlags)
+ if !EnableTraverseRunHooks {
+ break
+ }
+ }
+ }
+ if c.PreRunE != nil {
+ if err := c.PreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PreRun != nil {
+ c.PreRun(c, argWoFlags)
+ }
+
+ if err := c.ValidateRequiredFlags(); err != nil {
+ return err
+ }
+ if err := c.ValidateFlagGroups(); err != nil {
+ return err
+ }
+
+ if c.RunE != nil {
+ if err := c.RunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else {
+ c.Run(c, argWoFlags)
+ }
+ if c.PostRunE != nil {
+ if err := c.PostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PostRun != nil {
+ c.PostRun(c, argWoFlags)
+ }
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPostRunE != nil {
+ if err := p.PersistentPostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ if !EnableTraverseRunHooks {
+ break
+ }
+ } else if p.PersistentPostRun != nil {
+ p.PersistentPostRun(c, argWoFlags)
+ if !EnableTraverseRunHooks {
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+func (c *Command) preRun() {
+ for _, x := range initializers {
+ x()
+ }
+}
+
+func (c *Command) postRun() {
+ for _, x := range finalizers {
+ x()
+ }
+}
+
+// ExecuteContext is the same as Execute(), but sets the ctx on the command.
+// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
+// functions.
+func (c *Command) ExecuteContext(ctx context.Context) error {
+ c.ctx = ctx
+ return c.Execute()
+}
+
+// Execute uses the args (os.Args[1:] by default)
+// and run through the command tree finding appropriate matches
+// for commands and then corresponding flags.
+func (c *Command) Execute() error {
+ _, err := c.ExecuteC()
+ return err
+}
+
+// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command.
+// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
+// functions.
+func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) {
+ c.ctx = ctx
+ return c.ExecuteC()
+}
+
+// ExecuteC executes the command.
+func (c *Command) ExecuteC() (cmd *Command, err error) {
+ if c.ctx == nil {
+ c.ctx = context.Background()
+ }
+
+ // Regardless of what command execute is called on, run on Root only
+ if c.HasParent() {
+ return c.Root().ExecuteC()
+ }
+
+ // windows hook
+ if preExecHookFn != nil {
+ preExecHookFn(c)
+ }
+
+ // initialize help at the last point to allow for user overriding
+ c.InitDefaultHelpCmd()
+ // initialize completion at the last point to allow for user overriding
+ c.InitDefaultCompletionCmd()
+
+ // Now that all commands have been created, let's make sure all groups
+ // are properly created also
+ c.checkCommandGroups()
+
+ args := c.args
+
+ // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
+ if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
+ args = os.Args[1:]
+ }
+
+ // initialize the hidden command to be used for shell completion
+ c.initCompleteCmd(args)
+
+ var flags []string
+ if c.TraverseChildren {
+ cmd, flags, err = c.Traverse(args)
+ } else {
+ cmd, flags, err = c.Find(args)
+ }
+ if err != nil {
+ // If found parse to a subcommand and then failed, talk about the subcommand
+ if cmd != nil {
+ c = cmd
+ }
+ if !c.SilenceErrors {
+ c.PrintErrln(c.ErrPrefix(), err.Error())
+ c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath())
+ }
+ return c, err
+ }
+
+ cmd.commandCalledAs.called = true
+ if cmd.commandCalledAs.name == "" {
+ cmd.commandCalledAs.name = cmd.Name()
+ }
+
+ // We have to pass global context to children command
+ // if context is present on the parent command.
+ if cmd.ctx == nil {
+ cmd.ctx = c.ctx
+ }
+
+ err = cmd.execute(flags)
+ if err != nil {
+ // Always show help if requested, even if SilenceErrors is in
+ // effect
+ if errors.Is(err, flag.ErrHelp) {
+ cmd.HelpFunc()(cmd, args)
+ return cmd, nil
+ }
+
+ // If root command has SilenceErrors flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceErrors && !c.SilenceErrors {
+ c.PrintErrln(cmd.ErrPrefix(), err.Error())
+ }
+
+ // If root command has SilenceUsage flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceUsage && !c.SilenceUsage {
+ c.Println(cmd.UsageString())
+ }
+ }
+ return cmd, err
+}
+
+func (c *Command) ValidateArgs(args []string) error {
+ if c.Args == nil {
+ return ArbitraryArgs(c, args)
+ }
+ return c.Args(c, args)
+}
+
+// ValidateRequiredFlags validates all required flags are present and returns an error otherwise
+func (c *Command) ValidateRequiredFlags() error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ flags := c.Flags()
+ missingFlagNames := []string{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag]
+ if !found {
+ return
+ }
+ if (requiredAnnotation[0] == "true") && !pflag.Changed {
+ missingFlagNames = append(missingFlagNames, pflag.Name)
+ }
+ })
+
+ if len(missingFlagNames) > 0 {
+ return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`))
+ }
+ return nil
+}
+
+// checkCommandGroups checks if a command has been added to a group that does not exists.
+// If so, we panic because it indicates a coding error that should be corrected.
+func (c *Command) checkCommandGroups() {
+ for _, sub := range c.commands {
+ // if Group is not defined let the developer know right away
+ if sub.GroupID != "" && !c.ContainsGroup(sub.GroupID) {
+ panic(fmt.Sprintf("group id '%s' is not defined for subcommand '%s'", sub.GroupID, sub.CommandPath()))
+ }
+
+ sub.checkCommandGroups()
+ }
+}
+
+// InitDefaultHelpFlag adds default help flag to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help flag, it will do nothing.
+func (c *Command) InitDefaultHelpFlag() {
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("help") == nil {
+ usage := "help for "
+ name := c.displayName()
+ if name == "" {
+ usage += "this command"
+ } else {
+ usage += name
+ }
+ c.Flags().BoolP("help", "h", false, usage)
+ _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"})
+ }
+}
+
+// InitDefaultVersionFlag adds default version flag to c.
+// It is called automatically by executing the c.
+// If c already has a version flag, it will do nothing.
+// If c.Version is empty, it will do nothing.
+func (c *Command) InitDefaultVersionFlag() {
+ if c.Version == "" {
+ return
+ }
+
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("version") == nil {
+ usage := "version for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.Name()
+ }
+ if c.Flags().ShorthandLookup("v") == nil {
+ c.Flags().BoolP("version", "v", false, usage)
+ } else {
+ c.Flags().Bool("version", false, usage)
+ }
+ _ = c.Flags().SetAnnotation("version", FlagSetByCobraAnnotation, []string{"true"})
+ }
+}
+
+// InitDefaultHelpCmd adds default help command to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help command or c has no subcommands, it will do nothing.
+func (c *Command) InitDefaultHelpCmd() {
+ if !c.HasSubCommands() {
+ return
+ }
+
+ if c.helpCommand == nil {
+ c.helpCommand = &Command{
+ Use: "help [command]",
+ Short: "Help about any command",
+ Long: `Help provides help for any command in the application.
+Simply type ` + c.displayName() + ` help [path to command] for full details.`,
+ ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ var completions []string
+ cmd, _, e := c.Root().Find(args)
+ if e != nil {
+ return nil, ShellCompDirectiveNoFileComp
+ }
+ if cmd == nil {
+ // Root help command.
+ cmd = c.Root()
+ }
+ for _, subCmd := range cmd.Commands() {
+ if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand {
+ if strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ }
+ }
+ }
+ return completions, ShellCompDirectiveNoFileComp
+ },
+ Run: func(c *Command, args []string) {
+ cmd, _, e := c.Root().Find(args)
+ if cmd == nil || e != nil {
+ c.Printf("Unknown help topic %#q\n", args)
+ CheckErr(c.Root().Usage())
+ } else {
+ cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
+ cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown
+ CheckErr(cmd.Help())
+ }
+ },
+ GroupID: c.helpCommandGroupID,
+ }
+ }
+ c.RemoveCommand(c.helpCommand)
+ c.AddCommand(c.helpCommand)
+}
+
+// ResetCommands delete parent, subcommand and help command from c.
+func (c *Command) ResetCommands() {
+ c.parent = nil
+ c.commands = nil
+ c.helpCommand = nil
+ c.parentsPflags = nil
+}
+
+// Sorts commands by their names.
+type commandSorterByName []*Command
+
+func (c commandSorterByName) Len() int { return len(c) }
+func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
+func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() }
+
+// Commands returns a sorted slice of child commands.
+func (c *Command) Commands() []*Command {
+ // do not sort commands if it already sorted or sorting was disabled
+ if EnableCommandSorting && !c.commandsAreSorted {
+ sort.Sort(commandSorterByName(c.commands))
+ c.commandsAreSorted = true
+ }
+ return c.commands
+}
+
+// AddCommand adds one or more commands to this parent command.
+func (c *Command) AddCommand(cmds ...*Command) {
+ for i, x := range cmds {
+ if cmds[i] == c {
+ panic("Command can't be a child of itself")
+ }
+ cmds[i].parent = c
+ // update max lengths
+ usageLen := len(x.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(x.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(x.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ // If global normalization function exists, update all children
+ if c.globNormFunc != nil {
+ x.SetGlobalNormalizationFunc(c.globNormFunc)
+ }
+ c.commands = append(c.commands, x)
+ c.commandsAreSorted = false
+ }
+}
+
+// Groups returns a slice of child command groups.
+func (c *Command) Groups() []*Group {
+ return c.commandgroups
+}
+
+// AllChildCommandsHaveGroup returns if all subcommands are assigned to a group
+func (c *Command) AllChildCommandsHaveGroup() bool {
+ for _, sub := range c.commands {
+ if (sub.IsAvailableCommand() || sub == c.helpCommand) && sub.GroupID == "" {
+ return false
+ }
+ }
+ return true
+}
+
+// ContainsGroup return if groupID exists in the list of command groups.
+func (c *Command) ContainsGroup(groupID string) bool {
+ for _, x := range c.commandgroups {
+ if x.ID == groupID {
+ return true
+ }
+ }
+ return false
+}
+
+// AddGroup adds one or more command groups to this parent command.
+func (c *Command) AddGroup(groups ...*Group) {
+ c.commandgroups = append(c.commandgroups, groups...)
+}
+
+// RemoveCommand removes one or more commands from a parent command.
+func (c *Command) RemoveCommand(cmds ...*Command) {
+ commands := []*Command{}
+main:
+ for _, command := range c.commands {
+ for _, cmd := range cmds {
+ if command == cmd {
+ command.parent = nil
+ continue main
+ }
+ }
+ commands = append(commands, command)
+ }
+ c.commands = commands
+ // recompute all lengths
+ c.commandsMaxUseLen = 0
+ c.commandsMaxCommandPathLen = 0
+ c.commandsMaxNameLen = 0
+ for _, command := range c.commands {
+ usageLen := len(command.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(command.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(command.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ }
+}
+
+// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
+func (c *Command) Print(i ...interface{}) {
+ fmt.Fprint(c.OutOrStderr(), i...)
+}
+
+// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
+func (c *Command) Println(i ...interface{}) {
+ c.Print(fmt.Sprintln(i...))
+}
+
+// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
+func (c *Command) Printf(format string, i ...interface{}) {
+ c.Print(fmt.Sprintf(format, i...))
+}
+
+// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErr(i ...interface{}) {
+ fmt.Fprint(c.ErrOrStderr(), i...)
+}
+
+// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrln(i ...interface{}) {
+ c.PrintErr(fmt.Sprintln(i...))
+}
+
+// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrf(format string, i ...interface{}) {
+ c.PrintErr(fmt.Sprintf(format, i...))
+}
+
+// CommandPath returns the full path to this command.
+func (c *Command) CommandPath() string {
+ if c.HasParent() {
+ return c.Parent().CommandPath() + " " + c.Name()
+ }
+ return c.displayName()
+}
+
+func (c *Command) displayName() string {
+ if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok {
+ return displayName
+ }
+ return c.Name()
+}
+
+// UseLine puts out the full usage for a given command (including parents).
+func (c *Command) UseLine() string {
+ var useline string
+ use := strings.Replace(c.Use, c.Name(), c.displayName(), 1)
+ if c.HasParent() {
+ useline = c.parent.CommandPath() + " " + use
+ } else {
+ useline = use
+ }
+ if c.DisableFlagsInUseLine {
+ return useline
+ }
+ if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
+ useline += " [flags]"
+ }
+ return useline
+}
+
+// DebugFlags used to determine which flags have been assigned to which commands
+// and which persist.
+func (c *Command) DebugFlags() {
+ c.Println("DebugFlags called on", c.Name())
+ var debugflags func(*Command)
+
+ debugflags = func(x *Command) {
+ if x.HasFlags() || x.HasPersistentFlags() {
+ c.Println(x.Name())
+ }
+ if x.HasFlags() {
+ x.flags.VisitAll(func(f *flag.Flag) {
+ if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]")
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]")
+ }
+ })
+ }
+ if x.HasPersistentFlags() {
+ x.pflags.VisitAll(func(f *flag.Flag) {
+ if x.HasFlags() {
+ if x.flags.Lookup(f.Name) == nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ })
+ }
+ c.Println(x.flagErrorBuf)
+ if x.HasSubCommands() {
+ for _, y := range x.commands {
+ debugflags(y)
+ }
+ }
+ }
+
+ debugflags(c)
+}
+
+// Name returns the command's name: the first word in the use line.
+func (c *Command) Name() string {
+ name := c.Use
+ i := strings.Index(name, " ")
+ if i >= 0 {
+ name = name[:i]
+ }
+ return name
+}
+
+// HasAlias determines if a given string is an alias of the command.
+func (c *Command) HasAlias(s string) bool {
+ for _, a := range c.Aliases {
+ if commandNameMatches(a, s) {
+ return true
+ }
+ }
+ return false
+}
+
+// CalledAs returns the command name or alias that was used to invoke
+// this command or an empty string if the command has not been called.
+func (c *Command) CalledAs() string {
+ if c.commandCalledAs.called {
+ return c.commandCalledAs.name
+ }
+ return ""
+}
+
+// hasNameOrAliasPrefix returns true if the Name or any of aliases start
+// with prefix
+func (c *Command) hasNameOrAliasPrefix(prefix string) bool {
+ if strings.HasPrefix(c.Name(), prefix) {
+ c.commandCalledAs.name = c.Name()
+ return true
+ }
+ for _, alias := range c.Aliases {
+ if strings.HasPrefix(alias, prefix) {
+ c.commandCalledAs.name = alias
+ return true
+ }
+ }
+ return false
+}
+
+// NameAndAliases returns a list of the command name and all aliases
+func (c *Command) NameAndAliases() string {
+ return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
+}
+
+// HasExample determines if the command has example.
+func (c *Command) HasExample() bool {
+ return len(c.Example) > 0
+}
+
+// Runnable determines if the command is itself runnable.
+func (c *Command) Runnable() bool {
+ return c.Run != nil || c.RunE != nil
+}
+
+// HasSubCommands determines if the command has children commands.
+func (c *Command) HasSubCommands() bool {
+ return len(c.commands) > 0
+}
+
+// IsAvailableCommand determines if a command is available as a non-help command
+// (this includes all non deprecated/hidden commands).
+func (c *Command) IsAvailableCommand() bool {
+ if len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ if c.HasParent() && c.Parent().helpCommand == c {
+ return false
+ }
+
+ if c.Runnable() || c.HasAvailableSubCommands() {
+ return true
+ }
+
+ return false
+}
+
+// IsAdditionalHelpTopicCommand determines if a command is an additional
+// help topic command; additional help topic command is determined by the
+// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
+// are runnable/hidden/deprecated.
+// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+func (c *Command) IsAdditionalHelpTopicCommand() bool {
+ // if a command is runnable, deprecated, or hidden it is not a 'help' command
+ if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ // if any non-help sub commands are found, the command is not a 'help' command
+ for _, sub := range c.commands {
+ if !sub.IsAdditionalHelpTopicCommand() {
+ return false
+ }
+ }
+
+ // the command either has no sub commands, or no non-help sub commands
+ return true
+}
+
+// HasHelpSubCommands determines if a command has any available 'help' sub commands
+// that need to be shown in the usage/help default template under 'additional help
+// topics'.
+func (c *Command) HasHelpSubCommands() bool {
+ // return true on the first found available 'help' sub command
+ for _, sub := range c.commands {
+ if sub.IsAdditionalHelpTopicCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available 'help' sub commands
+ return false
+}
+
+// HasAvailableSubCommands determines if a command has available sub commands that
+// need to be shown in the usage/help default template under 'available commands'.
+func (c *Command) HasAvailableSubCommands() bool {
+ // return true on the first found available (non deprecated/help/hidden)
+ // sub command
+ for _, sub := range c.commands {
+ if sub.IsAvailableCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available (non deprecated/help/hidden)
+ // sub commands
+ return false
+}
+
+// HasParent determines if the command is a child command.
+func (c *Command) HasParent() bool {
+ return c.parent != nil
+}
+
+// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
+func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
+ return c.globNormFunc
+}
+
+// Flags returns the complete FlagSet that applies
+// to this command (local and persistent declared here and by all parents).
+func (c *Command) Flags() *flag.FlagSet {
+ if c.flags == nil {
+ c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.flags.SetOutput(c.flagErrorBuf)
+ }
+
+ return c.flags
+}
+
+// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
+// This function does not modify the flags of the current command, it's purpose is to return the current state.
+func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
+ persistentFlags := c.PersistentFlags()
+
+ out := flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ c.LocalFlags().VisitAll(func(f *flag.Flag) {
+ if persistentFlags.Lookup(f.Name) == nil {
+ out.AddFlag(f)
+ }
+ })
+ return out
+}
+
+// LocalFlags returns the local FlagSet specifically set in the current command.
+// This function does not modify the flags of the current command, it's purpose is to return the current state.
+func (c *Command) LocalFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.lflags == nil {
+ c.lflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.lflags.SetOutput(c.flagErrorBuf)
+ }
+ c.lflags.SortFlags = c.Flags().SortFlags
+ if c.globNormFunc != nil {
+ c.lflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ addToLocal := func(f *flag.Flag) {
+ // Add the flag if it is not a parent PFlag, or it shadows a parent PFlag
+ if c.lflags.Lookup(f.Name) == nil && f != c.parentsPflags.Lookup(f.Name) {
+ c.lflags.AddFlag(f)
+ }
+ }
+ c.Flags().VisitAll(addToLocal)
+ c.PersistentFlags().VisitAll(addToLocal)
+ return c.lflags
+}
+
+// InheritedFlags returns all flags which were inherited from parent commands.
+// This function does not modify the flags of the current command, it's purpose is to return the current state.
+func (c *Command) InheritedFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.iflags == nil {
+ c.iflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.iflags.SetOutput(c.flagErrorBuf)
+ }
+
+ local := c.LocalFlags()
+ if c.globNormFunc != nil {
+ c.iflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.parentsPflags.VisitAll(func(f *flag.Flag) {
+ if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil {
+ c.iflags.AddFlag(f)
+ }
+ })
+ return c.iflags
+}
+
+// NonInheritedFlags returns all flags which were not inherited from parent commands.
+// This function does not modify the flags of the current command, it's purpose is to return the current state.
+func (c *Command) NonInheritedFlags() *flag.FlagSet {
+ return c.LocalFlags()
+}
+
+// PersistentFlags returns the persistent FlagSet specifically set in the current command.
+func (c *Command) PersistentFlags() *flag.FlagSet {
+ if c.pflags == nil {
+ c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.pflags.SetOutput(c.flagErrorBuf)
+ }
+ return c.pflags
+}
+
+// ResetFlags deletes all flags from command.
+func (c *Command) ResetFlags() {
+ c.flagErrorBuf = new(bytes.Buffer)
+ c.flagErrorBuf.Reset()
+ c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ c.flags.SetOutput(c.flagErrorBuf)
+ c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ c.pflags.SetOutput(c.flagErrorBuf)
+
+ c.lflags = nil
+ c.iflags = nil
+ c.parentsPflags = nil
+}
+
+// HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
+func (c *Command) HasFlags() bool {
+ return c.Flags().HasFlags()
+}
+
+// HasPersistentFlags checks if the command contains persistent flags.
+func (c *Command) HasPersistentFlags() bool {
+ return c.PersistentFlags().HasFlags()
+}
+
+// HasLocalFlags checks if the command has flags specifically declared locally.
+func (c *Command) HasLocalFlags() bool {
+ return c.LocalFlags().HasFlags()
+}
+
+// HasInheritedFlags checks if the command has flags inherited from its parent command.
+func (c *Command) HasInheritedFlags() bool {
+ return c.InheritedFlags().HasFlags()
+}
+
+// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
+// structure) which are not hidden or deprecated.
+func (c *Command) HasAvailableFlags() bool {
+ return c.Flags().HasAvailableFlags()
+}
+
+// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
+func (c *Command) HasAvailablePersistentFlags() bool {
+ return c.PersistentFlags().HasAvailableFlags()
+}
+
+// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
+// or deprecated.
+func (c *Command) HasAvailableLocalFlags() bool {
+ return c.LocalFlags().HasAvailableFlags()
+}
+
+// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
+// not hidden or deprecated.
+func (c *Command) HasAvailableInheritedFlags() bool {
+ return c.InheritedFlags().HasAvailableFlags()
+}
+
+// Flag climbs up the command tree looking for matching flag.
+func (c *Command) Flag(name string) (flag *flag.Flag) {
+ flag = c.Flags().Lookup(name)
+
+ if flag == nil {
+ flag = c.persistentFlag(name)
+ }
+
+ return
+}
+
+// Recursively find matching persistent flag.
+func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
+ if c.HasPersistentFlags() {
+ flag = c.PersistentFlags().Lookup(name)
+ }
+
+ if flag == nil {
+ c.updateParentsPflags()
+ flag = c.parentsPflags.Lookup(name)
+ }
+ return
+}
+
+// ParseFlags parses persistent flag tree and local flags.
+func (c *Command) ParseFlags(args []string) error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ beforeErrorBufLen := c.flagErrorBuf.Len()
+ c.mergePersistentFlags()
+
+ // do it here after merging all flags and just before parse
+ c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
+
+ err := c.Flags().Parse(args)
+ // Print warnings if they occurred (e.g. deprecated flag messages).
+ if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil {
+ c.Print(c.flagErrorBuf.String())
+ }
+
+ return err
+}
+
+// Parent returns a commands parent command.
+func (c *Command) Parent() *Command {
+ return c.parent
+}
+
+// mergePersistentFlags merges c.PersistentFlags() to c.Flags()
+// and adds missing persistent flags of all parents.
+func (c *Command) mergePersistentFlags() {
+ c.updateParentsPflags()
+ c.Flags().AddFlagSet(c.PersistentFlags())
+ c.Flags().AddFlagSet(c.parentsPflags)
+}
+
+// updateParentsPflags updates c.parentsPflags by adding
+// new persistent flags of all parents.
+// If c.parentsPflags == nil, it makes new.
+func (c *Command) updateParentsPflags() {
+ if c.parentsPflags == nil {
+ c.parentsPflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ c.parentsPflags.SetOutput(c.flagErrorBuf)
+ c.parentsPflags.SortFlags = false
+ }
+
+ if c.globNormFunc != nil {
+ c.parentsPflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.Root().PersistentFlags().AddFlagSet(flag.CommandLine)
+
+ c.VisitParents(func(parent *Command) {
+ c.parentsPflags.AddFlagSet(parent.PersistentFlags())
+ })
+}
+
+// commandNameMatches checks if two command names are equal
+// taking into account case sensitivity according to
+// EnableCaseInsensitive global configuration.
+func commandNameMatches(s string, t string) bool {
+ if EnableCaseInsensitive {
+ return strings.EqualFold(s, t)
+ }
+
+ return s == t
+}
diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go
new file mode 100644
index 0000000..307f0c1
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_notwin.go
@@ -0,0 +1,20 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package cobra
+
+var preExecHookFn func(*Command)
diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go
new file mode 100644
index 0000000..adbef39
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_win.go
@@ -0,0 +1,41 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+// +build windows
+
+package cobra
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/inconshreveable/mousetrap"
+)
+
+var preExecHookFn = preExecHook
+
+func preExecHook(c *Command) {
+ if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
+ c.Print(MousetrapHelpText)
+ if MousetrapDisplayDuration > 0 {
+ time.Sleep(MousetrapDisplayDuration)
+ } else {
+ c.Println("Press return to continue...")
+ fmt.Scanln()
+ }
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go
new file mode 100644
index 0000000..c0c08b0
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/completions.go
@@ -0,0 +1,939 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/spf13/pflag"
+)
+
+const (
+ // ShellCompRequestCmd is the name of the hidden command that is used to request
+ // completion results from the program. It is used by the shell completion scripts.
+ ShellCompRequestCmd = "__complete"
+ // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request
+ // completion results without their description. It is used by the shell completion scripts.
+ ShellCompNoDescRequestCmd = "__completeNoDesc"
+)
+
+// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it.
+var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){}
+
+// lock for reading and writing from flagCompletionFunctions
+var flagCompletionMutex = &sync.RWMutex{}
+
+// ShellCompDirective is a bit map representing the different behaviors the shell
+// can be instructed to have once completions have been provided.
+type ShellCompDirective int
+
+type flagCompError struct {
+ subCommand string
+ flagName string
+}
+
+func (e *flagCompError) Error() string {
+ return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'"
+}
+
+const (
+ // ShellCompDirectiveError indicates an error occurred and completions should be ignored.
+ ShellCompDirectiveError ShellCompDirective = 1 << iota
+
+ // ShellCompDirectiveNoSpace indicates that the shell should not add a space
+ // after the completion even if there is a single completion provided.
+ ShellCompDirectiveNoSpace
+
+ // ShellCompDirectiveNoFileComp indicates that the shell should not provide
+ // file completion even when no completion is provided.
+ ShellCompDirectiveNoFileComp
+
+ // ShellCompDirectiveFilterFileExt indicates that the provided completions
+ // should be used as file extension filters.
+ // For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename()
+ // is a shortcut to using this directive explicitly. The BashCompFilenameExt
+ // annotation can also be used to obtain the same behavior for flags.
+ ShellCompDirectiveFilterFileExt
+
+ // ShellCompDirectiveFilterDirs indicates that only directory names should
+ // be provided in file completion. To request directory names within another
+ // directory, the returned completions should specify the directory within
+ // which to search. The BashCompSubdirsInDir annotation can be used to
+ // obtain the same behavior but only for flags.
+ ShellCompDirectiveFilterDirs
+
+ // ShellCompDirectiveKeepOrder indicates that the shell should preserve the order
+ // in which the completions are provided
+ ShellCompDirectiveKeepOrder
+
+ // ===========================================================================
+
+ // All directives using iota should be above this one.
+ // For internal use.
+ shellCompDirectiveMaxValue
+
+ // ShellCompDirectiveDefault indicates to let the shell perform its default
+ // behavior after completions have been provided.
+ // This one must be last to avoid messing up the iota count.
+ ShellCompDirectiveDefault ShellCompDirective = 0
+)
+
+const (
+ // Constants for the completion command
+ compCmdName = "completion"
+ compCmdNoDescFlagName = "no-descriptions"
+ compCmdNoDescFlagDesc = "disable completion descriptions"
+ compCmdNoDescFlagDefault = false
+)
+
+// CompletionOptions are the options to control shell completion
+type CompletionOptions struct {
+ // DisableDefaultCmd prevents Cobra from creating a default 'completion' command
+ DisableDefaultCmd bool
+ // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag
+ // for shells that support completion descriptions
+ DisableNoDescFlag bool
+ // DisableDescriptions turns off all completion descriptions for shells
+ // that support them
+ DisableDescriptions bool
+ // HiddenDefaultCmd makes the default 'completion' command hidden
+ HiddenDefaultCmd bool
+}
+
+// NoFileCompletions can be used to disable file completion for commands that should
+// not trigger file completions.
+func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ return nil, ShellCompDirectiveNoFileComp
+}
+
+// FixedCompletions can be used to create a completion function which always
+// returns the same results.
+func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ return choices, directive
+ }
+}
+
+// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
+func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error {
+ flag := c.Flag(flagName)
+ if flag == nil {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName)
+ }
+ flagCompletionMutex.Lock()
+ defer flagCompletionMutex.Unlock()
+
+ if _, exists := flagCompletionFunctions[flag]; exists {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName)
+ }
+ flagCompletionFunctions[flag] = f
+ return nil
+}
+
+// GetFlagCompletionFunc returns the completion function for the given flag of the command, if available.
+func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) {
+ flag := c.Flag(flagName)
+ if flag == nil {
+ return nil, false
+ }
+
+ flagCompletionMutex.RLock()
+ defer flagCompletionMutex.RUnlock()
+
+ completionFunc, exists := flagCompletionFunctions[flag]
+ return completionFunc, exists
+}
+
+// Returns a string listing the different directive enabled in the specified parameter
+func (d ShellCompDirective) string() string {
+ var directives []string
+ if d&ShellCompDirectiveError != 0 {
+ directives = append(directives, "ShellCompDirectiveError")
+ }
+ if d&ShellCompDirectiveNoSpace != 0 {
+ directives = append(directives, "ShellCompDirectiveNoSpace")
+ }
+ if d&ShellCompDirectiveNoFileComp != 0 {
+ directives = append(directives, "ShellCompDirectiveNoFileComp")
+ }
+ if d&ShellCompDirectiveFilterFileExt != 0 {
+ directives = append(directives, "ShellCompDirectiveFilterFileExt")
+ }
+ if d&ShellCompDirectiveFilterDirs != 0 {
+ directives = append(directives, "ShellCompDirectiveFilterDirs")
+ }
+ if d&ShellCompDirectiveKeepOrder != 0 {
+ directives = append(directives, "ShellCompDirectiveKeepOrder")
+ }
+ if len(directives) == 0 {
+ directives = append(directives, "ShellCompDirectiveDefault")
+ }
+
+ if d >= shellCompDirectiveMaxValue {
+ return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d)
+ }
+ return strings.Join(directives, ", ")
+}
+
+// initCompleteCmd adds a special hidden command that can be used to request custom completions.
+func (c *Command) initCompleteCmd(args []string) {
+ completeCmd := &Command{
+ Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd),
+ Aliases: []string{ShellCompNoDescRequestCmd},
+ DisableFlagsInUseLine: true,
+ Hidden: true,
+ DisableFlagParsing: true,
+ Args: MinimumNArgs(1),
+ Short: "Request shell completion choices for the specified command-line",
+ Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s",
+ "to request completion choices for the specified command-line.", ShellCompRequestCmd),
+ Run: func(cmd *Command, args []string) {
+ finalCmd, completions, directive, err := cmd.getCompletions(args)
+ if err != nil {
+ CompErrorln(err.Error())
+ // Keep going for multiple reasons:
+ // 1- There could be some valid completions even though there was an error
+ // 2- Even without completions, we need to print the directive
+ }
+
+ noDescriptions := cmd.CalledAs() == ShellCompNoDescRequestCmd
+ if !noDescriptions {
+ if doDescriptions, err := strconv.ParseBool(getEnvConfig(cmd, configEnvVarSuffixDescriptions)); err == nil {
+ noDescriptions = !doDescriptions
+ }
+ }
+ noActiveHelp := GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable
+ out := finalCmd.OutOrStdout()
+ for _, comp := range completions {
+ if noActiveHelp && strings.HasPrefix(comp, activeHelpMarker) {
+ // Remove all activeHelp entries if it's disabled.
+ continue
+ }
+ if noDescriptions {
+ // Remove any description that may be included following a tab character.
+ comp = strings.SplitN(comp, "\t", 2)[0]
+ }
+
+ // Make sure we only write the first line to the output.
+ // This is needed if a description contains a linebreak.
+ // Otherwise the shell scripts will interpret the other lines as new flags
+ // and could therefore provide a wrong completion.
+ comp = strings.SplitN(comp, "\n", 2)[0]
+
+ // Finally trim the completion. This is especially important to get rid
+ // of a trailing tab when there are no description following it.
+ // For example, a sub-command without a description should not be completed
+ // with a tab at the end (or else zsh will show a -- following it
+ // although there is no description).
+ comp = strings.TrimSpace(comp)
+
+ // Print each possible completion to the output for the completion script to consume.
+ fmt.Fprintln(out, comp)
+ }
+
+ // As the last printout, print the completion directive for the completion script to parse.
+ // The directive integer must be that last character following a single colon (:).
+ // The completion script expects :
+ fmt.Fprintf(out, ":%d\n", directive)
+
+ // Print some helpful info to stderr for the user to understand.
+ // Output from stderr must be ignored by the completion script.
+ fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string())
+ },
+ }
+ c.AddCommand(completeCmd)
+ subCmd, _, err := c.Find(args)
+ if err != nil || subCmd.Name() != ShellCompRequestCmd {
+ // Only create this special command if it is actually being called.
+ // This reduces possible side-effects of creating such a command;
+ // for example, having this command would cause problems to a
+ // cobra program that only consists of the root command, since this
+ // command would cause the root command to suddenly have a subcommand.
+ c.RemoveCommand(completeCmd)
+ }
+}
+
+func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) {
+ // The last argument, which is not completely typed by the user,
+ // should not be part of the list of arguments
+ toComplete := args[len(args)-1]
+ trimmedArgs := args[:len(args)-1]
+
+ var finalCmd *Command
+ var finalArgs []string
+ var err error
+ // Find the real command for which completion must be performed
+ // check if we need to traverse here to parse local flags on parent commands
+ if c.Root().TraverseChildren {
+ finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs)
+ } else {
+ // For Root commands that don't specify any value for their Args fields, when we call
+ // Find(), if those Root commands don't have any sub-commands, they will accept arguments.
+ // However, because we have added the __complete sub-command in the current code path, the
+ // call to Find() -> legacyArgs() will return an error if there are any arguments.
+ // To avoid this, we first remove the __complete command to get back to having no sub-commands.
+ rootCmd := c.Root()
+ if len(rootCmd.Commands()) == 1 {
+ rootCmd.RemoveCommand(c)
+ }
+
+ finalCmd, finalArgs, err = rootCmd.Find(trimmedArgs)
+ }
+ if err != nil {
+ // Unable to find the real command. E.g., someInvalidCmd
+ return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs)
+ }
+ finalCmd.ctx = c.ctx
+
+ // These flags are normally added when `execute()` is called on `finalCmd`,
+ // however, when doing completion, we don't call `finalCmd.execute()`.
+ // Let's add the --help and --version flag ourselves but only if the finalCmd
+ // has not disabled flag parsing; if flag parsing is disabled, it is up to the
+ // finalCmd itself to handle the completion of *all* flags.
+ if !finalCmd.DisableFlagParsing {
+ finalCmd.InitDefaultHelpFlag()
+ finalCmd.InitDefaultVersionFlag()
+ }
+
+ // Check if we are doing flag value completion before parsing the flags.
+ // This is important because if we are completing a flag value, we need to also
+ // remove the flag name argument from the list of finalArgs or else the parsing
+ // could fail due to an invalid value (incomplete) for the flag.
+ flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete)
+
+ // Check if interspersed is false or -- was set on a previous arg.
+ // This works by counting the arguments. Normally -- is not counted as arg but
+ // if -- was already set or interspersed is false and there is already one arg then
+ // the extra added -- is counted as arg.
+ flagCompletion := true
+ _ = finalCmd.ParseFlags(append(finalArgs, "--"))
+ newArgCount := finalCmd.Flags().NArg()
+
+ // Parse the flags early so we can check if required flags are set
+ if err = finalCmd.ParseFlags(finalArgs); err != nil {
+ return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error())
+ }
+
+ realArgCount := finalCmd.Flags().NArg()
+ if newArgCount > realArgCount {
+ // don't do flag completion (see above)
+ flagCompletion = false
+ }
+ // Error while attempting to parse flags
+ if flagErr != nil {
+ // If error type is flagCompError and we don't want flagCompletion we should ignore the error
+ if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) {
+ return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr
+ }
+ }
+
+ // Look for the --help or --version flags. If they are present,
+ // there should be no further completions.
+ if helpOrVersionFlagPresent(finalCmd) {
+ return finalCmd, []string{}, ShellCompDirectiveNoFileComp, nil
+ }
+
+ // We only remove the flags from the arguments if DisableFlagParsing is not set.
+ // This is important for commands which have requested to do their own flag completion.
+ if !finalCmd.DisableFlagParsing {
+ finalArgs = finalCmd.Flags().Args()
+ }
+
+ if flag != nil && flagCompletion {
+ // Check if we are completing a flag value subject to annotations
+ if validExts, present := flag.Annotations[BashCompFilenameExt]; present {
+ if len(validExts) != 0 {
+ // File completion filtered by extensions
+ return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil
+ }
+
+ // The annotation requests simple file completion. There is no reason to do
+ // that since it is the default behavior anyway. Let's ignore this annotation
+ // in case the program also registered a completion function for this flag.
+ // Even though it is a mistake on the program's side, let's be nice when we can.
+ }
+
+ if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present {
+ if len(subDir) == 1 {
+ // Directory completion from within a directory
+ return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil
+ }
+ // Directory completion
+ return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil
+ }
+ }
+
+ var completions []string
+ var directive ShellCompDirective
+
+ // Enforce flag groups before doing flag completions
+ finalCmd.enforceFlagGroupsForCompletion()
+
+ // Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true;
+ // doing this allows for completion of persistent flag names even for commands that disable flag parsing.
+ //
+ // When doing completion of a flag name, as soon as an argument starts with
+ // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires
+ // the flag name to be complete
+ if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion {
+ // First check for required flags
+ completions = completeRequireFlags(finalCmd, toComplete)
+
+ // If we have not found any required flags, only then can we show regular flags
+ if len(completions) == 0 {
+ doCompleteFlags := func(flag *pflag.Flag) {
+ if !flag.Changed ||
+ strings.Contains(flag.Value.Type(), "Slice") ||
+ strings.Contains(flag.Value.Type(), "Array") {
+ // If the flag is not already present, or if it can be specified multiple times (Array or Slice)
+ // we suggest it as a completion
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ }
+ }
+
+ // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
+ // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
+ // non-inherited flags.
+ finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteFlags(flag)
+ })
+ // Try to complete non-inherited flags even if DisableFlagParsing==true.
+ // This allows programs to tell Cobra about flags for completion even
+ // if the actual parsing of flags is not done by Cobra.
+ // For instance, Helm uses this to provide flag name completion for
+ // some of its plugins.
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteFlags(flag)
+ })
+ }
+
+ directive = ShellCompDirectiveNoFileComp
+ if len(completions) == 1 && strings.HasSuffix(completions[0], "=") {
+ // If there is a single completion, the shell usually adds a space
+ // after the completion. We don't want that if the flag ends with an =
+ directive = ShellCompDirectiveNoSpace
+ }
+
+ if !finalCmd.DisableFlagParsing {
+ // If DisableFlagParsing==false, we have completed the flags as known by Cobra;
+ // we can return what we found.
+ // If DisableFlagParsing==true, Cobra may not be aware of all flags, so we
+ // let the logic continue to see if ValidArgsFunction needs to be called.
+ return finalCmd, completions, directive, nil
+ }
+ } else {
+ directive = ShellCompDirectiveDefault
+ if flag == nil {
+ foundLocalNonPersistentFlag := false
+ // If TraverseChildren is true on the root command we don't check for
+ // local flags because we can use a local flag on a parent command
+ if !finalCmd.Root().TraverseChildren {
+ // Check if there are any local, non-persistent flags on the command-line
+ localNonPersistentFlags := finalCmd.LocalNonPersistentFlags()
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed {
+ foundLocalNonPersistentFlag = true
+ }
+ })
+ }
+
+ // Complete subcommand names, including the help command
+ if len(finalArgs) == 0 && !foundLocalNonPersistentFlag {
+ // We only complete sub-commands if:
+ // - there are no arguments on the command-line and
+ // - there are no local, non-persistent flags on the command-line or TraverseChildren is true
+ for _, subCmd := range finalCmd.Commands() {
+ if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand {
+ if strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ }
+ directive = ShellCompDirectiveNoFileComp
+ }
+ }
+ }
+
+ // Complete required flags even without the '-' prefix
+ completions = append(completions, completeRequireFlags(finalCmd, toComplete)...)
+
+ // Always complete ValidArgs, even if we are completing a subcommand name.
+ // This is for commands that have both subcommands and ValidArgs.
+ if len(finalCmd.ValidArgs) > 0 {
+ if len(finalArgs) == 0 {
+ // ValidArgs are only for the first argument
+ for _, validArg := range finalCmd.ValidArgs {
+ if strings.HasPrefix(validArg, toComplete) {
+ completions = append(completions, validArg)
+ }
+ }
+ directive = ShellCompDirectiveNoFileComp
+
+ // If no completions were found within commands or ValidArgs,
+ // see if there are any ArgAliases that should be completed.
+ if len(completions) == 0 {
+ for _, argAlias := range finalCmd.ArgAliases {
+ if strings.HasPrefix(argAlias, toComplete) {
+ completions = append(completions, argAlias)
+ }
+ }
+ }
+ }
+
+ // If there are ValidArgs specified (even if they don't match), we stop completion.
+ // Only one of ValidArgs or ValidArgsFunction can be used for a single command.
+ return finalCmd, completions, directive, nil
+ }
+
+ // Let the logic continue so as to add any ValidArgsFunction completions,
+ // even if we already found sub-commands.
+ // This is for commands that have subcommands but also specify a ValidArgsFunction.
+ }
+ }
+
+ // Find the completion function for the flag or command
+ var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
+ if flag != nil && flagCompletion {
+ flagCompletionMutex.RLock()
+ completionFn = flagCompletionFunctions[flag]
+ flagCompletionMutex.RUnlock()
+ } else {
+ completionFn = finalCmd.ValidArgsFunction
+ }
+ if completionFn != nil {
+ // Go custom completion defined for this flag or command.
+ // Call the registered completion function to get the completions.
+ var comps []string
+ comps, directive = completionFn(finalCmd, finalArgs, toComplete)
+ completions = append(completions, comps...)
+ }
+
+ return finalCmd, completions, directive, nil
+}
+
+func helpOrVersionFlagPresent(cmd *Command) bool {
+ if versionFlag := cmd.Flags().Lookup("version"); versionFlag != nil &&
+ len(versionFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && versionFlag.Changed {
+ return true
+ }
+ if helpFlag := cmd.Flags().Lookup("help"); helpFlag != nil &&
+ len(helpFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && helpFlag.Changed {
+ return true
+ }
+ return false
+}
+
+func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string {
+ if nonCompletableFlag(flag) {
+ return []string{}
+ }
+
+ var completions []string
+ flagName := "--" + flag.Name
+ if strings.HasPrefix(flagName, toComplete) {
+ // Flag without the =
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+
+ // Why suggest both long forms: --flag and --flag= ?
+ // This forces the user to *always* have to type either an = or a space after the flag name.
+ // Let's be nice and avoid making users have to do that.
+ // Since boolean flags and shortname flags don't show the = form, let's go that route and never show it.
+ // The = form will still work, we just won't suggest it.
+ // This also makes the list of suggested flags shorter as we avoid all the = forms.
+ //
+ // if len(flag.NoOptDefVal) == 0 {
+ // // Flag requires a value, so it can be suffixed with =
+ // flagName += "="
+ // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+ // }
+ }
+
+ flagName = "-" + flag.Shorthand
+ if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+ }
+
+ return completions
+}
+
+func completeRequireFlags(finalCmd *Command, toComplete string) []string {
+ var completions []string
+
+ doCompleteRequiredFlags := func(flag *pflag.Flag) {
+ if _, present := flag.Annotations[BashCompOneRequiredFlag]; present {
+ if !flag.Changed {
+ // If the flag is not already present, we suggest it as a completion
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ }
+ }
+ }
+
+ // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
+ // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
+ // non-inherited flags.
+ finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteRequiredFlags(flag)
+ })
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteRequiredFlags(flag)
+ })
+
+ return completions
+}
+
+func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) {
+ if finalCmd.DisableFlagParsing {
+ // We only do flag completion if we are allowed to parse flags
+ // This is important for commands which have requested to do their own flag completion.
+ return nil, args, lastArg, nil
+ }
+
+ var flagName string
+ trimmedArgs := args
+ flagWithEqual := false
+ orgLastArg := lastArg
+
+ // When doing completion of a flag name, as soon as an argument starts with
+ // a '-' we know it is a flag. We cannot use isFlagArg() here as that function
+ // requires the flag name to be complete
+ if len(lastArg) > 0 && lastArg[0] == '-' {
+ if index := strings.Index(lastArg, "="); index >= 0 {
+ // Flag with an =
+ if strings.HasPrefix(lastArg[:index], "--") {
+ // Flag has full name
+ flagName = lastArg[2:index]
+ } else {
+ // Flag is shorthand
+ // We have to get the last shorthand flag name
+ // e.g. `-asd` => d to provide the correct completion
+ // https://github.com/spf13/cobra/issues/1257
+ flagName = lastArg[index-1 : index]
+ }
+ lastArg = lastArg[index+1:]
+ flagWithEqual = true
+ } else {
+ // Normal flag completion
+ return nil, args, lastArg, nil
+ }
+ }
+
+ if len(flagName) == 0 {
+ if len(args) > 0 {
+ prevArg := args[len(args)-1]
+ if isFlagArg(prevArg) {
+ // Only consider the case where the flag does not contain an =.
+ // If the flag contains an = it means it has already been fully processed,
+ // so we don't need to deal with it here.
+ if index := strings.Index(prevArg, "="); index < 0 {
+ if strings.HasPrefix(prevArg, "--") {
+ // Flag has full name
+ flagName = prevArg[2:]
+ } else {
+ // Flag is shorthand
+ // We have to get the last shorthand flag name
+ // e.g. `-asd` => d to provide the correct completion
+ // https://github.com/spf13/cobra/issues/1257
+ flagName = prevArg[len(prevArg)-1:]
+ }
+ // Remove the uncompleted flag or else there could be an error created
+ // for an invalid value for that flag
+ trimmedArgs = args[:len(args)-1]
+ }
+ }
+ }
+ }
+
+ if len(flagName) == 0 {
+ // Not doing flag completion
+ return nil, trimmedArgs, lastArg, nil
+ }
+
+ flag := findFlag(finalCmd, flagName)
+ if flag == nil {
+ // Flag not supported by this command, the interspersed option might be set so return the original args
+ return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName}
+ }
+
+ if !flagWithEqual {
+ if len(flag.NoOptDefVal) != 0 {
+ // We had assumed dealing with a two-word flag but the flag is a boolean flag.
+ // In that case, there is no value following it, so we are not really doing flag completion.
+ // Reset everything to do noun completion.
+ trimmedArgs = args
+ flag = nil
+ }
+ }
+
+ return flag, trimmedArgs, lastArg, nil
+}
+
+// InitDefaultCompletionCmd adds a default 'completion' command to c.
+// This function will do nothing if any of the following is true:
+// 1- the feature has been explicitly disabled by the program,
+// 2- c has no subcommands (to avoid creating one),
+// 3- c already has a 'completion' command provided by the program.
+func (c *Command) InitDefaultCompletionCmd() {
+ if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() {
+ return
+ }
+
+ for _, cmd := range c.commands {
+ if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) {
+ // A completion command is already available
+ return
+ }
+ }
+
+ haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions
+
+ completionCmd := &Command{
+ Use: compCmdName,
+ Short: "Generate the autocompletion script for the specified shell",
+ Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell.
+See each sub-command's help for details on how to use the generated script.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ Hidden: c.CompletionOptions.HiddenDefaultCmd,
+ GroupID: c.completionCommandGroupID,
+ }
+ c.AddCommand(completionCmd)
+
+ out := c.OutOrStdout()
+ noDesc := c.CompletionOptions.DisableDescriptions
+ shortDesc := "Generate the autocompletion script for %s"
+ bash := &Command{
+ Use: "bash",
+ Short: fmt.Sprintf(shortDesc, "bash"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell.
+
+This script depends on the 'bash-completion' package.
+If it is not installed already, you can install it via your OS's package manager.
+
+To load completions in your current shell session:
+
+ source <(%[1]s completion bash)
+
+To load completions for every new session, execute once:
+
+#### Linux:
+
+ %[1]s completion bash > /etc/bash_completion.d/%[1]s
+
+#### macOS:
+
+ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s
+
+You will need to start a new shell for this setup to take effect.
+`, c.Root().Name()),
+ Args: NoArgs,
+ DisableFlagsInUseLine: true,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ return cmd.Root().GenBashCompletionV2(out, !noDesc)
+ },
+ }
+ if haveNoDescFlag {
+ bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ zsh := &Command{
+ Use: "zsh",
+ Short: fmt.Sprintf(shortDesc, "zsh"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell.
+
+If shell completion is not already enabled in your environment you will need
+to enable it. You can execute the following once:
+
+ echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+To load completions in your current shell session:
+
+ source <(%[1]s completion zsh)
+
+To load completions for every new session, execute once:
+
+#### Linux:
+
+ %[1]s completion zsh > "${fpath[1]}/_%[1]s"
+
+#### macOS:
+
+ %[1]s completion zsh > $(brew --prefix)/share/zsh/site-functions/_%[1]s
+
+You will need to start a new shell for this setup to take effect.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ if noDesc {
+ return cmd.Root().GenZshCompletionNoDesc(out)
+ }
+ return cmd.Root().GenZshCompletion(out)
+ },
+ }
+ if haveNoDescFlag {
+ zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ fish := &Command{
+ Use: "fish",
+ Short: fmt.Sprintf(shortDesc, "fish"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell.
+
+To load completions in your current shell session:
+
+ %[1]s completion fish | source
+
+To load completions for every new session, execute once:
+
+ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
+
+You will need to start a new shell for this setup to take effect.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ return cmd.Root().GenFishCompletion(out, !noDesc)
+ },
+ }
+ if haveNoDescFlag {
+ fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ powershell := &Command{
+ Use: "powershell",
+ Short: fmt.Sprintf(shortDesc, "powershell"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for powershell.
+
+To load completions in your current shell session:
+
+ %[1]s completion powershell | Out-String | Invoke-Expression
+
+To load completions for every new session, add the output of the above command
+to your powershell profile.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ if noDesc {
+ return cmd.Root().GenPowerShellCompletion(out)
+ }
+ return cmd.Root().GenPowerShellCompletionWithDesc(out)
+
+ },
+ }
+ if haveNoDescFlag {
+ powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ completionCmd.AddCommand(bash, zsh, fish, powershell)
+}
+
+func findFlag(cmd *Command, name string) *pflag.Flag {
+ flagSet := cmd.Flags()
+ if len(name) == 1 {
+ // First convert the short flag into a long flag
+ // as the cmd.Flag() search only accepts long flags
+ if short := flagSet.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ set := cmd.InheritedFlags()
+ if short = set.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ return nil
+ }
+ }
+ }
+ return cmd.Flag(name)
+}
+
+// CompDebug prints the specified string to the same file as where the
+// completion script prints its logs.
+// Note that completion printouts should never be on stdout as they would
+// be wrongly interpreted as actual completion choices by the completion script.
+func CompDebug(msg string, printToStdErr bool) {
+ msg = fmt.Sprintf("[Debug] %s", msg)
+
+ // Such logs are only printed when the user has set the environment
+ // variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+ if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" {
+ f, err := os.OpenFile(path,
+ os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err == nil {
+ defer f.Close()
+ WriteStringAndCheck(f, msg)
+ }
+ }
+
+ if printToStdErr {
+ // Must print to stderr for this not to be read by the completion script.
+ fmt.Fprint(os.Stderr, msg)
+ }
+}
+
+// CompDebugln prints the specified string with a newline at the end
+// to the same file as where the completion script prints its logs.
+// Such logs are only printed when the user has set the environment
+// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+func CompDebugln(msg string, printToStdErr bool) {
+ CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr)
+}
+
+// CompError prints the specified completion message to stderr.
+func CompError(msg string) {
+ msg = fmt.Sprintf("[Error] %s", msg)
+ CompDebug(msg, true)
+}
+
+// CompErrorln prints the specified completion message to stderr with a newline at the end.
+func CompErrorln(msg string) {
+ CompError(fmt.Sprintf("%s\n", msg))
+}
+
+// These values should not be changed: users will be using them explicitly.
+const (
+ configEnvVarGlobalPrefix = "COBRA"
+ configEnvVarSuffixDescriptions = "COMPLETION_DESCRIPTIONS"
+)
+
+var configEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`)
+
+// configEnvVar returns the name of the program-specific configuration environment
+// variable. It has the format _ where is the name of the
+// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`.
+func configEnvVar(name, suffix string) string {
+ // This format should not be changed: users will be using it explicitly.
+ v := strings.ToUpper(fmt.Sprintf("%s_%s", name, suffix))
+ v = configEnvVarPrefixSubstRegexp.ReplaceAllString(v, "_")
+ return v
+}
+
+// getEnvConfig returns the value of the configuration environment variable
+// _ where is the name of the root command in upper
+// case, with all non-ASCII-alphanumeric characters replaced by `_`.
+// If the value is empty or not set, the value of the environment variable
+// COBRA_ is returned instead.
+func getEnvConfig(cmd *Command, suffix string) string {
+ v := os.Getenv(configEnvVar(cmd.Root().Name(), suffix))
+ if v == "" {
+ v = os.Getenv(configEnvVar(configEnvVarGlobalPrefix, suffix))
+ }
+ return v
+}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go
new file mode 100644
index 0000000..12d61b6
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/fish_completions.go
@@ -0,0 +1,292 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+func genFishComp(buf io.StringWriter, name string, includeDesc bool) {
+ // Variables should not contain a '-' or ':' character
+ nameForVar := name
+ nameForVar = strings.ReplaceAll(nameForVar, "-", "_")
+ nameForVar = strings.ReplaceAll(nameForVar, ":", "_")
+
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`
+function __%[1]s_debug
+ set -l file "$BASH_COMP_DEBUG_FILE"
+ if test -n "$file"
+ echo "$argv" >> $file
+ end
+end
+
+function __%[1]s_perform_completion
+ __%[1]s_debug "Starting __%[1]s_perform_completion"
+
+ # Extract all args except the last one
+ set -l args (commandline -opc)
+ # Extract the last arg and escape it in case it is a space
+ set -l lastArg (string escape -- (commandline -ct))
+
+ __%[1]s_debug "args: $args"
+ __%[1]s_debug "last arg: $lastArg"
+
+ # Disable ActiveHelp which is not supported for fish shell
+ set -l requestComp "%[10]s=0 $args[1] %[3]s $args[2..-1] $lastArg"
+
+ __%[1]s_debug "Calling $requestComp"
+ set -l results (eval $requestComp 2> /dev/null)
+
+ # Some programs may output extra empty lines after the directive.
+ # Let's ignore them or else it will break completion.
+ # Ref: https://github.com/spf13/cobra/issues/1279
+ for line in $results[-1..1]
+ if test (string trim -- $line) = ""
+ # Found an empty line, remove it
+ set results $results[1..-2]
+ else
+ # Found non-empty line, we have our proper output
+ break
+ end
+ end
+
+ set -l comps $results[1..-2]
+ set -l directiveLine $results[-1]
+
+ # For Fish, when completing a flag with an = (e.g., -n=)
+ # completions must be prefixed with the flag
+ set -l flagPrefix (string match -r -- '-.*=' "$lastArg")
+
+ __%[1]s_debug "Comps: $comps"
+ __%[1]s_debug "DirectiveLine: $directiveLine"
+ __%[1]s_debug "flagPrefix: $flagPrefix"
+
+ for comp in $comps
+ printf "%%s%%s\n" "$flagPrefix" "$comp"
+ end
+
+ printf "%%s\n" "$directiveLine"
+end
+
+# this function limits calls to __%[1]s_perform_completion, by caching the result behind $__%[1]s_perform_completion_once_result
+function __%[1]s_perform_completion_once
+ __%[1]s_debug "Starting __%[1]s_perform_completion_once"
+
+ if test -n "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "Seems like a valid result already exists, skipping __%[1]s_perform_completion"
+ return 0
+ end
+
+ set --global __%[1]s_perform_completion_once_result (__%[1]s_perform_completion)
+ if test -z "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "No completions, probably due to a failure"
+ return 1
+ end
+
+ __%[1]s_debug "Performed completions and set __%[1]s_perform_completion_once_result"
+ return 0
+end
+
+# this function is used to clear the $__%[1]s_perform_completion_once_result variable after completions are run
+function __%[1]s_clear_perform_completion_once_result
+ __%[1]s_debug ""
+ __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable =========="
+ set --erase __%[1]s_perform_completion_once_result
+ __%[1]s_debug "Successfully erased the variable __%[1]s_perform_completion_once_result"
+end
+
+function __%[1]s_requires_order_preservation
+ __%[1]s_debug ""
+ __%[1]s_debug "========= checking if order preservation is required =========="
+
+ __%[1]s_perform_completion_once
+ if test -z "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "Error determining if order preservation is required"
+ return 1
+ end
+
+ set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1])
+ __%[1]s_debug "Directive is: $directive"
+
+ set -l shellCompDirectiveKeepOrder %[9]d
+ set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) %% 2)
+ __%[1]s_debug "Keeporder is: $keeporder"
+
+ if test $keeporder -ne 0
+ __%[1]s_debug "This does require order preservation"
+ return 0
+ end
+
+ __%[1]s_debug "This doesn't require order preservation"
+ return 1
+end
+
+
+# This function does two things:
+# - Obtain the completions and store them in the global __%[1]s_comp_results
+# - Return false if file completion should be performed
+function __%[1]s_prepare_completions
+ __%[1]s_debug ""
+ __%[1]s_debug "========= starting completion logic =========="
+
+ # Start fresh
+ set --erase __%[1]s_comp_results
+
+ __%[1]s_perform_completion_once
+ __%[1]s_debug "Completion results: $__%[1]s_perform_completion_once_result"
+
+ if test -z "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "No completion, probably due to a failure"
+ # Might as well do file completion, in case it helps
+ return 1
+ end
+
+ set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1])
+ set --global __%[1]s_comp_results $__%[1]s_perform_completion_once_result[1..-2]
+
+ __%[1]s_debug "Completions are: $__%[1]s_comp_results"
+ __%[1]s_debug "Directive is: $directive"
+
+ set -l shellCompDirectiveError %[4]d
+ set -l shellCompDirectiveNoSpace %[5]d
+ set -l shellCompDirectiveNoFileComp %[6]d
+ set -l shellCompDirectiveFilterFileExt %[7]d
+ set -l shellCompDirectiveFilterDirs %[8]d
+
+ if test -z "$directive"
+ set directive 0
+ end
+
+ set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2)
+ if test $compErr -eq 1
+ __%[1]s_debug "Received error directive: aborting."
+ # Might as well do file completion, in case it helps
+ return 1
+ end
+
+ set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2)
+ set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2)
+ if test $filefilter -eq 1; or test $dirfilter -eq 1
+ __%[1]s_debug "File extension filtering or directory filtering not supported"
+ # Do full file completion instead
+ return 1
+ end
+
+ set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2)
+ set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2)
+
+ __%[1]s_debug "nospace: $nospace, nofiles: $nofiles"
+
+ # If we want to prevent a space, or if file completion is NOT disabled,
+ # we need to count the number of valid completions.
+ # To do so, we will filter on prefix as the completions we have received
+ # may not already be filtered so as to allow fish to match on different
+ # criteria than the prefix.
+ if test $nospace -ne 0; or test $nofiles -eq 0
+ set -l prefix (commandline -t | string escape --style=regex)
+ __%[1]s_debug "prefix: $prefix"
+
+ set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results)
+ set --global __%[1]s_comp_results $completions
+ __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results"
+
+ # Important not to quote the variable for count to work
+ set -l numComps (count $__%[1]s_comp_results)
+ __%[1]s_debug "numComps: $numComps"
+
+ if test $numComps -eq 1; and test $nospace -ne 0
+ # We must first split on \t to get rid of the descriptions to be
+ # able to check what the actual completion will be.
+ # We don't need descriptions anyway since there is only a single
+ # real completion which the shell will expand immediately.
+ set -l split (string split --max 1 \t $__%[1]s_comp_results[1])
+
+ # Fish won't add a space if the completion ends with any
+ # of the following characters: @=/:.,
+ set -l lastChar (string sub -s -1 -- $split)
+ if not string match -r -q "[@=/:.,]" -- "$lastChar"
+ # In other cases, to support the "nospace" directive we trick the shell
+ # by outputting an extra, longer completion.
+ __%[1]s_debug "Adding second completion to perform nospace directive"
+ set --global __%[1]s_comp_results $split[1] $split[1].
+ __%[1]s_debug "Completions are now: $__%[1]s_comp_results"
+ end
+ end
+
+ if test $numComps -eq 0; and test $nofiles -eq 0
+ # To be consistent with bash and zsh, we only trigger file
+ # completion when there are no other completions
+ __%[1]s_debug "Requesting file completion"
+ return 1
+ end
+ end
+
+ return 0
+end
+
+# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves
+# so we can properly delete any completions provided by another script.
+# Only do this if the program can be found, or else fish may print some errors; besides,
+# the existing completions will only be loaded if the program can be found.
+if type -q "%[2]s"
+ # The space after the program name is essential to trigger completion for the program
+ # and not completion of the program name itself.
+ # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish.
+ complete --do-complete "%[2]s " > /dev/null 2>&1
+end
+
+# Remove any pre-existing completions for the program since we will be handling all of them.
+complete -c %[2]s -e
+
+# this will get called after the two calls below and clear the $__%[1]s_perform_completion_once_result global
+complete -c %[2]s -n '__%[1]s_clear_perform_completion_once_result'
+# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results
+# which provides the program's completion choices.
+# If this doesn't require order preservation, we don't use the -k flag
+complete -c %[2]s -n 'not __%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
+# otherwise we use the -k flag
+complete -k -c %[2]s -n '__%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
+`, nameForVar, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name)))
+}
+
+// GenFishCompletion generates fish completion file and writes to the passed writer.
+func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genFishComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+// GenFishCompletionFile generates fish completion file.
+func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenFishCompletion(outFile, includeDesc)
+}
diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go
new file mode 100644
index 0000000..560612f
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/flag_groups.go
@@ -0,0 +1,290 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+const (
+ requiredAsGroupAnnotation = "cobra_annotation_required_if_others_set"
+ oneRequiredAnnotation = "cobra_annotation_one_required"
+ mutuallyExclusiveAnnotation = "cobra_annotation_mutually_exclusive"
+)
+
+// MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors
+// if the command is invoked with a subset (but not all) of the given flags.
+func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v))
+ }
+ if err := c.Flags().SetAnnotation(v, requiredAsGroupAnnotation, append(f.Annotations[requiredAsGroupAnnotation], strings.Join(flagNames, " "))); err != nil {
+ // Only errs if the flag isn't found.
+ panic(err)
+ }
+ }
+}
+
+// MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors
+// if the command is invoked without at least one flag from the given set of flags.
+func (c *Command) MarkFlagsOneRequired(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v))
+ }
+ if err := c.Flags().SetAnnotation(v, oneRequiredAnnotation, append(f.Annotations[oneRequiredAnnotation], strings.Join(flagNames, " "))); err != nil {
+ // Only errs if the flag isn't found.
+ panic(err)
+ }
+ }
+}
+
+// MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors
+// if the command is invoked with more than one flag from the given set of flags.
+func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v))
+ }
+ // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed.
+ if err := c.Flags().SetAnnotation(v, mutuallyExclusiveAnnotation, append(f.Annotations[mutuallyExclusiveAnnotation], strings.Join(flagNames, " "))); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the
+// first error encountered.
+func (c *Command) ValidateFlagGroups() error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ flags := c.Flags()
+
+ // groupStatus format is the list of flags as a unique ID,
+ // then a map of each flag name and whether it is set or not.
+ groupStatus := map[string]map[string]bool{}
+ oneRequiredGroupStatus := map[string]map[string]bool{}
+ mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus)
+ processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus)
+ processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus)
+ })
+
+ if err := validateRequiredFlagGroups(groupStatus); err != nil {
+ return err
+ }
+ if err := validateOneRequiredFlagGroups(oneRequiredGroupStatus); err != nil {
+ return err
+ }
+ if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil {
+ return err
+ }
+ return nil
+}
+
+func hasAllFlags(fs *flag.FlagSet, flagnames ...string) bool {
+ for _, fname := range flagnames {
+ f := fs.Lookup(fname)
+ if f == nil {
+ return false
+ }
+ }
+ return true
+}
+
+func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annotation string, groupStatus map[string]map[string]bool) {
+ groupInfo, found := pflag.Annotations[annotation]
+ if found {
+ for _, group := range groupInfo {
+ if groupStatus[group] == nil {
+ flagnames := strings.Split(group, " ")
+
+ // Only consider this flag group at all if all the flags are defined.
+ if !hasAllFlags(flags, flagnames...) {
+ continue
+ }
+
+ groupStatus[group] = make(map[string]bool, len(flagnames))
+ for _, name := range flagnames {
+ groupStatus[group][name] = false
+ }
+ }
+
+ groupStatus[group][pflag.Name] = pflag.Changed
+ }
+ }
+}
+
+func validateRequiredFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+
+ unset := []string{}
+ for flagname, isSet := range flagnameAndStatus {
+ if !isSet {
+ unset = append(unset, flagname)
+ }
+ }
+ if len(unset) == len(flagnameAndStatus) || len(unset) == 0 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(unset)
+ return fmt.Errorf("if any flags in the group [%v] are set they must all be set; missing %v", flagList, unset)
+ }
+
+ return nil
+}
+
+func validateOneRequiredFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+ var set []string
+ for flagname, isSet := range flagnameAndStatus {
+ if isSet {
+ set = append(set, flagname)
+ }
+ }
+ if len(set) >= 1 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(set)
+ return fmt.Errorf("at least one of the flags in the group [%v] is required", flagList)
+ }
+ return nil
+}
+
+func validateExclusiveFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+ var set []string
+ for flagname, isSet := range flagnameAndStatus {
+ if isSet {
+ set = append(set, flagname)
+ }
+ }
+ if len(set) == 0 || len(set) == 1 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(set)
+ return fmt.Errorf("if any flags in the group [%v] are set none of the others can be; %v were all set", flagList, set)
+ }
+ return nil
+}
+
+func sortedKeys(m map[string]map[string]bool) []string {
+ keys := make([]string, len(m))
+ i := 0
+ for k := range m {
+ keys[i] = k
+ i++
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// enforceFlagGroupsForCompletion will do the following:
+// - when a flag in a group is present, other flags in the group will be marked required
+// - when none of the flags in a one-required group are present, all flags in the group will be marked required
+// - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden
+// This allows the standard completion logic to behave appropriately for flag groups
+func (c *Command) enforceFlagGroupsForCompletion() {
+ if c.DisableFlagParsing {
+ return
+ }
+
+ flags := c.Flags()
+ groupStatus := map[string]map[string]bool{}
+ oneRequiredGroupStatus := map[string]map[string]bool{}
+ mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
+ c.Flags().VisitAll(func(pflag *flag.Flag) {
+ processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus)
+ processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus)
+ processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus)
+ })
+
+ // If a flag that is part of a group is present, we make all the other flags
+ // of that group required so that the shell completion suggests them automatically
+ for flagList, flagnameAndStatus := range groupStatus {
+ for _, isSet := range flagnameAndStatus {
+ if isSet {
+ // One of the flags of the group is set, mark the other ones as required
+ for _, fName := range strings.Split(flagList, " ") {
+ _ = c.MarkFlagRequired(fName)
+ }
+ }
+ }
+ }
+
+ // If none of the flags of a one-required group are present, we make all the flags
+ // of that group required so that the shell completion suggests them automatically
+ for flagList, flagnameAndStatus := range oneRequiredGroupStatus {
+ isSet := false
+
+ for _, isSet = range flagnameAndStatus {
+ if isSet {
+ break
+ }
+ }
+
+ // None of the flags of the group are set, mark all flags in the group
+ // as required
+ if !isSet {
+ for _, fName := range strings.Split(flagList, " ") {
+ _ = c.MarkFlagRequired(fName)
+ }
+ }
+ }
+
+ // If a flag that is mutually exclusive to others is present, we hide the other
+ // flags of that group so the shell completion does not suggest them
+ for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus {
+ for flagName, isSet := range flagnameAndStatus {
+ if isSet {
+ // One of the flags of the mutually exclusive group is set, mark the other ones as hidden
+ // Don't mark the flag that is already set as hidden because it may be an
+ // array or slice flag and therefore must continue being suggested
+ for _, fName := range strings.Split(flagList, " ") {
+ if fName != flagName {
+ flag := c.Flags().Lookup(fName)
+ flag.Hidden = true
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go
new file mode 100644
index 0000000..a830b7b
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/powershell_completions.go
@@ -0,0 +1,325 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but
+// can be downloaded separately for windows 7 or 8.1).
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) {
+ // Variables should not contain a '-' or ':' character
+ nameForVar := name
+ nameForVar = strings.ReplaceAll(nameForVar, "-", "_")
+ nameForVar = strings.ReplaceAll(nameForVar, ":", "_")
+
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*-
+
+function __%[1]s_debug {
+ if ($env:BASH_COMP_DEBUG_FILE) {
+ "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE"
+ }
+}
+
+filter __%[1]s_escapeStringWithSpecialChars {
+`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+`
+}
+
+[scriptblock]${__%[2]sCompleterBlock} = {
+ param(
+ $WordToComplete,
+ $CommandAst,
+ $CursorPosition
+ )
+
+ # Get the current command line and convert into a string
+ $Command = $CommandAst.CommandElements
+ $Command = "$Command"
+
+ __%[1]s_debug ""
+ __%[1]s_debug "========= starting completion logic =========="
+ __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $CursorPosition location, so we need
+ # to truncate the command-line ($Command) up to the $CursorPosition location.
+ # Make sure the $Command is longer then the $CursorPosition before we truncate.
+ # This happens because the $Command does not include the last space.
+ if ($Command.Length -gt $CursorPosition) {
+ $Command=$Command.Substring(0,$CursorPosition)
+ }
+ __%[1]s_debug "Truncated command: $Command"
+
+ $ShellCompDirectiveError=%[4]d
+ $ShellCompDirectiveNoSpace=%[5]d
+ $ShellCompDirectiveNoFileComp=%[6]d
+ $ShellCompDirectiveFilterFileExt=%[7]d
+ $ShellCompDirectiveFilterDirs=%[8]d
+ $ShellCompDirectiveKeepOrder=%[9]d
+
+ # Prepare the command to request completions for the program.
+ # Split the command at the first space to separate the program and arguments.
+ $Program,$Arguments = $Command.Split(" ",2)
+
+ $RequestComp="$Program %[3]s $Arguments"
+ __%[1]s_debug "RequestComp: $RequestComp"
+
+ # we cannot use $WordToComplete because it
+ # has the wrong values if the cursor was moved
+ # so use the last argument
+ if ($WordToComplete -ne "" ) {
+ $WordToComplete = $Arguments.Split(" ")[-1]
+ }
+ __%[1]s_debug "New WordToComplete: $WordToComplete"
+
+
+ # Check for flag with equal sign
+ $IsEqualFlag = ($WordToComplete -Like "--*=*" )
+ if ( $IsEqualFlag ) {
+ __%[1]s_debug "Completing equal sign flag"
+ # Remove the flag part
+ $Flag,$WordToComplete = $WordToComplete.Split("=",2)
+ }
+
+ if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) {
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "Adding extra empty parameter"
+ # PowerShell 7.2+ changed the way how the arguments are passed to executables,
+ # so for pre-7.2 or when Legacy argument passing is enabled we need to use
+`+" # `\"`\" to pass an empty argument, a \"\" or '' does not work!!!"+`
+ if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or
+ ($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or
+ (($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and
+ $PSNativeCommandArgumentPassing -eq 'Legacy')) {
+`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+`
+ } else {
+ $RequestComp="$RequestComp" + ' ""'
+ }
+ }
+
+ __%[1]s_debug "Calling $RequestComp"
+ # First disable ActiveHelp which is not supported for Powershell
+ ${env:%[10]s}=0
+
+ #call the command store the output in $out and redirect stderr and stdout to null
+ # $Out is an array contains each line per element
+ Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null
+
+ # get directive from last line
+ [int]$Directive = $Out[-1].TrimStart(':')
+ if ($Directive -eq "") {
+ # There is no directive specified
+ $Directive = 0
+ }
+ __%[1]s_debug "The completion directive is: $Directive"
+
+ # remove directive (last element) from out
+ $Out = $Out | Where-Object { $_ -ne $Out[-1] }
+ __%[1]s_debug "The completions are: $Out"
+
+ if (($Directive -band $ShellCompDirectiveError) -ne 0 ) {
+ # Error code. No completion.
+ __%[1]s_debug "Received error from custom completion go code"
+ return
+ }
+
+ $Longest = 0
+ [Array]$Values = $Out | ForEach-Object {
+ #Split the output in name and description
+`+" $Name, $Description = $_.Split(\"`t\",2)"+`
+ __%[1]s_debug "Name: $Name Description: $Description"
+
+ # Look for the longest completion so that we can format things nicely
+ if ($Longest -lt $Name.Length) {
+ $Longest = $Name.Length
+ }
+
+ # Set the description to a one space string if there is none set.
+ # This is needed because the CompletionResult does not accept an empty string as argument
+ if (-Not $Description) {
+ $Description = " "
+ }
+ @{Name="$Name";Description="$Description"}
+ }
+
+
+ $Space = " "
+ if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) {
+ # remove the space here
+ __%[1]s_debug "ShellCompDirectiveNoSpace is called"
+ $Space = ""
+ }
+
+ if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or
+ (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) {
+ __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported"
+
+ # return here to prevent the completion of the extensions
+ return
+ }
+
+ $Values = $Values | Where-Object {
+ # filter the result
+ $_.Name -like "$WordToComplete*"
+
+ # Join the flag back if we have an equal sign flag
+ if ( $IsEqualFlag ) {
+ __%[1]s_debug "Join the equal sign flag back to the completion value"
+ $_.Name = $Flag + "=" + $_.Name
+ }
+ }
+
+ # we sort the values in ascending order by name if keep order isn't passed
+ if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) {
+ $Values = $Values | Sort-Object -Property Name
+ }
+
+ if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) {
+ __%[1]s_debug "ShellCompDirectiveNoFileComp is called"
+
+ if ($Values.Length -eq 0) {
+ # Just print an empty string here so the
+ # shell does not start to complete paths.
+ # We cannot use CompletionResult here because
+ # it does not accept an empty string as argument.
+ ""
+ return
+ }
+ }
+
+ # Get the current mode
+ $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function
+ __%[1]s_debug "Mode: $Mode"
+
+ $Values | ForEach-Object {
+
+ # store temporary because switch will overwrite $_
+ $comp = $_
+
+ # PowerShell supports three different completion modes
+ # - TabCompleteNext (default windows style - on each key press the next option is displayed)
+ # - Complete (works like bash)
+ # - MenuComplete (works like zsh)
+ # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function
+
+ # CompletionResult Arguments:
+ # 1) CompletionText text to be used as the auto completion result
+ # 2) ListItemText text to be displayed in the suggestion list
+ # 3) ResultType type of completion result
+ # 4) ToolTip text for the tooltip with details about the object
+
+ switch ($Mode) {
+
+ # bash like
+ "Complete" {
+
+ if ($Values.Length -eq 1) {
+ __%[1]s_debug "Only one completion left"
+
+ # insert space after value
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+
+ } else {
+ # Add the proper number of spaces to align the descriptions
+ while($comp.Name.Length -lt $Longest) {
+ $comp.Name = $comp.Name + " "
+ }
+
+ # Check for empty description and only add parentheses if needed
+ if ($($comp.Description) -eq " " ) {
+ $Description = ""
+ } else {
+ $Description = " ($($comp.Description))"
+ }
+
+ [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)")
+ }
+ }
+
+ # zsh like
+ "MenuComplete" {
+ # insert space after value
+ # MenuComplete will automatically show the ToolTip of
+ # the highlighted value at the bottom of the suggestions.
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+ }
+
+ # TabCompleteNext and in case we get something unknown
+ Default {
+ # Like MenuComplete but we don't want to add a space here because
+ # the user need to press space anyway to get the completion.
+ # Description will not be shown because that's not possible with TabCompleteNext
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+ }
+ }
+
+ }
+}
+
+Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock ${__%[2]sCompleterBlock}
+`, name, nameForVar, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name)))
+}
+
+func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genPowerShellComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.genPowerShellCompletion(outFile, includeDesc)
+}
+
+// GenPowerShellCompletionFile generates powershell completion file without descriptions.
+func (c *Command) GenPowerShellCompletionFile(filename string) error {
+ return c.genPowerShellCompletionFile(filename, false)
+}
+
+// GenPowerShellCompletion generates powershell completion file without descriptions
+// and writes it to the passed writer.
+func (c *Command) GenPowerShellCompletion(w io.Writer) error {
+ return c.genPowerShellCompletion(w, false)
+}
+
+// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions.
+func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error {
+ return c.genPowerShellCompletionFile(filename, true)
+}
+
+// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions
+// and writes it to the passed writer.
+func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error {
+ return c.genPowerShellCompletion(w, true)
+}
diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go
new file mode 100644
index 0000000..b035742
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/shell_completions.go
@@ -0,0 +1,98 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "github.com/spf13/pflag"
+)
+
+// MarkFlagRequired instructs the various shell completion implementations to
+// prioritize the named flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkFlagRequired(name string) error {
+ return MarkFlagRequired(c.Flags(), name)
+}
+
+// MarkPersistentFlagRequired instructs the various shell completion implementations to
+// prioritize the named persistent flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkPersistentFlagRequired(name string) error {
+ return MarkFlagRequired(c.PersistentFlags(), name)
+}
+
+// MarkFlagRequired instructs the various shell completion implementations to
+// prioritize the named flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
+}
+
+// MarkFlagFilename instructs the various shell completion implementations to
+// limit completions for the named flag to the specified file extensions.
+func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.Flags(), name, extensions...)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// The bash completion script will call the bash function f for the flag.
+//
+// This will only work for bash completion.
+// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
+// to register a Go function which will work across all shells.
+func (c *Command) MarkFlagCustom(name string, f string) error {
+ return MarkFlagCustom(c.Flags(), name, f)
+}
+
+// MarkPersistentFlagFilename instructs the various shell completion
+// implementations to limit completions for the named persistent flag to the
+// specified file extensions.
+func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
+}
+
+// MarkFlagFilename instructs the various shell completion implementations to
+// limit completions for the named flag to the specified file extensions.
+func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
+ return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// The bash completion script will call the bash function f for the flag.
+//
+// This will only work for bash completion.
+// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
+// to register a Go function which will work across all shells.
+func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
+ return flags.SetAnnotation(name, BashCompCustom, []string{f})
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// limit completions for the named flag to directory names.
+func (c *Command) MarkFlagDirname(name string) error {
+ return MarkFlagDirname(c.Flags(), name)
+}
+
+// MarkPersistentFlagDirname instructs the various shell completion
+// implementations to limit completions for the named persistent flag to
+// directory names.
+func (c *Command) MarkPersistentFlagDirname(name string) error {
+ return MarkFlagDirname(c.PersistentFlags(), name)
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// limit completions for the named flag to directory names.
+func MarkFlagDirname(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompSubdirsInDir, []string{})
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
new file mode 100644
index 0000000..1856e4c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -0,0 +1,308 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// GenZshCompletionFile generates zsh completion file including descriptions.
+func (c *Command) GenZshCompletionFile(filename string) error {
+ return c.genZshCompletionFile(filename, true)
+}
+
+// GenZshCompletion generates zsh completion file including descriptions
+// and writes it to the passed writer.
+func (c *Command) GenZshCompletion(w io.Writer) error {
+ return c.genZshCompletion(w, true)
+}
+
+// GenZshCompletionFileNoDesc generates zsh completion file without descriptions.
+func (c *Command) GenZshCompletionFileNoDesc(filename string) error {
+ return c.genZshCompletionFile(filename, false)
+}
+
+// GenZshCompletionNoDesc generates zsh completion file without descriptions
+// and writes it to the passed writer.
+func (c *Command) GenZshCompletionNoDesc(w io.Writer) error {
+ return c.genZshCompletion(w, false)
+}
+
+// MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was
+// not consistent with Bash completion. It has therefore been disabled.
+// Instead, when no other completion is specified, file completion is done by
+// default for every argument. One can disable file completion on a per-argument
+// basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp.
+// To achieve file extension filtering, one can use ValidArgsFunction and
+// ShellCompDirectiveFilterFileExt.
+//
+// Deprecated
+func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error {
+ return nil
+}
+
+// MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore
+// been disabled.
+// To achieve the same behavior across all shells, one can use
+// ValidArgs (for the first argument only) or ValidArgsFunction for
+// any argument (can include the first one also).
+//
+// Deprecated
+func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error {
+ return nil
+}
+
+func (c *Command) genZshCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.genZshCompletion(outFile, includeDesc)
+}
+
+func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genZshComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func genZshComp(buf io.StringWriter, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s
+compdef _%[1]s %[1]s
+
+# zsh completion for %-36[1]s -*- shell-script -*-
+
+__%[1]s_debug()
+{
+ local file="$BASH_COMP_DEBUG_FILE"
+ if [[ -n ${file} ]]; then
+ echo "$*" >> "${file}"
+ fi
+}
+
+_%[1]s()
+{
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+ local shellCompDirectiveKeepOrder=%[8]d
+
+ local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder
+ local -a completions
+
+ __%[1]s_debug "\n========= starting completion logic =========="
+ __%[1]s_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $CURRENT location, so we need
+ # to truncate the command-line ($words) up to the $CURRENT location.
+ # (We cannot use $CURSOR as its value does not work when a command is an alias.)
+ words=("${=words[1,CURRENT]}")
+ __%[1]s_debug "Truncated words[*]: ${words[*]},"
+
+ lastParam=${words[-1]}
+ lastChar=${lastParam[-1]}
+ __%[1]s_debug "lastParam: ${lastParam}, lastChar: ${lastChar}"
+
+ # For zsh, when completing a flag with an = (e.g., %[1]s -n=)
+ # completions must be prefixed with the flag
+ setopt local_options BASH_REMATCH
+ if [[ "${lastParam}" =~ '-.*=' ]]; then
+ # We are dealing with a flag with an =
+ flagPrefix="-P ${BASH_REMATCH}"
+ fi
+
+ # Prepare the command to obtain completions
+ requestComp="${words[1]} %[2]s ${words[2,-1]}"
+ if [ "${lastChar}" = "" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go completion code.
+ __%[1]s_debug "Adding extra empty parameter"
+ requestComp="${requestComp} \"\""
+ fi
+
+ __%[1]s_debug "About to call: eval ${requestComp}"
+
+ # Use eval to handle any environment variables and such
+ out=$(eval ${requestComp} 2>/dev/null)
+ __%[1]s_debug "completion output: ${out}"
+
+ # Extract the directive integer following a : from the last line
+ local lastLine
+ while IFS='\n' read -r line; do
+ lastLine=${line}
+ done < <(printf "%%s\n" "${out[@]}")
+ __%[1]s_debug "last line: ${lastLine}"
+
+ if [ "${lastLine[1]}" = : ]; then
+ directive=${lastLine[2,-1]}
+ # Remove the directive including the : and the newline
+ local suffix
+ (( suffix=${#lastLine}+2))
+ out=${out[1,-$suffix]}
+ else
+ # There is no directive specified. Leave $out as is.
+ __%[1]s_debug "No directive found. Setting do default"
+ directive=0
+ fi
+
+ __%[1]s_debug "directive: ${directive}"
+ __%[1]s_debug "completions: ${out}"
+ __%[1]s_debug "flagPrefix: ${flagPrefix}"
+
+ if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
+ __%[1]s_debug "Completion received error. Ignoring completions."
+ return
+ fi
+
+ local activeHelpMarker="%[9]s"
+ local endIndex=${#activeHelpMarker}
+ local startIndex=$((${#activeHelpMarker}+1))
+ local hasActiveHelp=0
+ while IFS='\n' read -r comp; do
+ # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker)
+ if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then
+ __%[1]s_debug "ActiveHelp found: $comp"
+ comp="${comp[$startIndex,-1]}"
+ if [ -n "$comp" ]; then
+ compadd -x "${comp}"
+ __%[1]s_debug "ActiveHelp will need delimiter"
+ hasActiveHelp=1
+ fi
+
+ continue
+ fi
+
+ if [ -n "$comp" ]; then
+ # If requested, completions are returned with a description.
+ # The description is preceded by a TAB character.
+ # For zsh's _describe, we need to use a : instead of a TAB.
+ # We first need to escape any : as part of the completion itself.
+ comp=${comp//:/\\:}
+
+ local tab="$(printf '\t')"
+ comp=${comp//$tab/:}
+
+ __%[1]s_debug "Adding completion: ${comp}"
+ completions+=${comp}
+ lastComp=$comp
+ fi
+ done < <(printf "%%s\n" "${out[@]}")
+
+ # Add a delimiter after the activeHelp statements, but only if:
+ # - there are completions following the activeHelp statements, or
+ # - file completion will be performed (so there will be choices after the activeHelp)
+ if [ $hasActiveHelp -eq 1 ]; then
+ if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then
+ __%[1]s_debug "Adding activeHelp delimiter"
+ compadd -x "--"
+ hasActiveHelp=0
+ fi
+ fi
+
+ if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
+ __%[1]s_debug "Activating nospace."
+ noSpace="-S ''"
+ fi
+
+ if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then
+ __%[1]s_debug "Activating keep order."
+ keepOrder="-V"
+ fi
+
+ if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
+ # File extension filtering
+ local filteringCmd
+ filteringCmd='_files'
+ for filter in ${completions[@]}; do
+ if [ ${filter[1]} != '*' ]; then
+ # zsh requires a glob pattern to do file filtering
+ filter="\*.$filter"
+ fi
+ filteringCmd+=" -g $filter"
+ done
+ filteringCmd+=" ${flagPrefix}"
+
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ _arguments '*:filename:'"$filteringCmd"
+ elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
+ # File completion for directories only
+ local subdir
+ subdir="${completions[1]}"
+ if [ -n "$subdir" ]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ pushd "${subdir}" >/dev/null 2>&1
+ else
+ __%[1]s_debug "Listing directories in ."
+ fi
+
+ local result
+ _arguments '*:dirname:_files -/'" ${flagPrefix}"
+ result=$?
+ if [ -n "$subdir" ]; then
+ popd >/dev/null 2>&1
+ fi
+ return $result
+ else
+ __%[1]s_debug "Calling _describe"
+ if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then
+ __%[1]s_debug "_describe found some completions"
+
+ # Return the success of having called _describe
+ return 0
+ else
+ __%[1]s_debug "_describe did not find completions."
+ __%[1]s_debug "Checking if we should do file completion."
+ if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
+ __%[1]s_debug "deactivating file completion"
+
+ # We must return an error code here to let zsh know that there were no
+ # completions found by _describe; this is what will trigger other
+ # matching algorithms to attempt to find completions.
+ # For example zsh can match letters in the middle of words.
+ return 1
+ else
+ # Perform file completion
+ __%[1]s_debug "Activating file completion"
+
+ # We must return the result of this command, so it must be the
+ # last command, or else we must store its result to return it.
+ _arguments '*:filename:_files'" ${flagPrefix}"
+ fi
+ fi
+ fi
+}
+
+# don't run the completion function when being source-ed or eval-ed
+if [ "$funcstack[1]" = "_%[1]s" ]; then
+ _%[1]s
+fi
+`, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder,
+ activeHelpMarker))
+}
diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore
new file mode 100644
index 0000000..c3da290
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.gitignore
@@ -0,0 +1,2 @@
+.idea/*
+
diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml
new file mode 100644
index 0000000..00d04cb
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.travis.yml
@@ -0,0 +1,22 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+install:
+ - go get golang.org/x/lint/golint
+ - export PATH=$GOPATH/bin:$PATH
+ - go install ./...
+
+script:
+ - verify/all.sh -v
+ - go test ./...
diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE
new file mode 100644
index 0000000..63ed1cf
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Alex Ogier. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md
new file mode 100644
index 0000000..7eacc5b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/README.md
@@ -0,0 +1,296 @@
+[](https://travis-ci.org/spf13/pflag)
+[](https://goreportcard.com/report/github.com/spf13/pflag)
+[](https://godoc.org/github.com/spf13/pflag)
+
+## Description
+
+pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the [GNU extensions to the POSIX recommendations
+for command-line options][1]. For a more precise description, see the
+"Command-line flag syntax" section below.
+
+[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+pflag is available under the same style of BSD license as the Go language,
+which can be found in the LICENSE file.
+
+## Installation
+
+pflag is available using the standard `go get` command.
+
+Install by running:
+
+ go get github.com/spf13/pflag
+
+Run tests by running:
+
+ go test github.com/spf13/pflag
+
+## Usage
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+``` go
+import flag "github.com/spf13/pflag"
+```
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+
+``` go
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+```
+
+If you like, you can bind the flag to a variable using the Var() functions.
+
+``` go
+var flagvar int
+func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+}
+```
+
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+
+``` go
+flag.Var(&flagVal, "name", "help message for flagname")
+```
+
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+
+``` go
+flag.Parse()
+```
+
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+
+``` go
+fmt.Println("ip has value ", *ip)
+fmt.Println("flagvar has value ", flagvar)
+```
+
+There are helper functions available to get the value stored in a Flag if you have a FlagSet but find
+it difficult to keep up with all of the pointers in your code.
+If you have a pflag.FlagSet with a flag called 'flagname' of type int you
+can use GetInt() to get the int value. But notice that 'flagname' must exist
+and it must be an int. GetString("flagname") will fail.
+
+``` go
+i, err := flagset.GetInt("flagname")
+```
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+var flagvar bool
+func init() {
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+}
+flag.VarP(&flagVal, "varname", "v", "help message")
+```
+
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+
+## Setting no option default values for flags
+
+After you create a flag it is possible to set the pflag.NoOptDefVal for
+the given flag. Doing this changes the meaning of the flag slightly. If
+a flag has a NoOptDefVal and the flag is set on the command line without
+an option the flag will be set to the NoOptDefVal. For example given:
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+flag.Lookup("flagname").NoOptDefVal = "4321"
+```
+
+Would result in something like
+
+| Parsed Arguments | Resulting Value |
+| ------------- | ------------- |
+| --flagname=1357 | ip=1357 |
+| --flagname | ip=4321 |
+| [nothing] | ip=1234 |
+
+## Command line flag syntax
+
+```
+--flag // boolean flags, or flags with no option default values
+--flag x // only on flags without a default value
+--flag=x
+```
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags
+or a flag with a default value
+
+```
+// boolean or flags where the 'no option default value' is set
+-f
+-f=true
+-abc
+but
+-b true is INVALID
+
+// non-boolean and flags without a 'no option default value'
+-n 1234
+-n=1234
+-n1234
+
+// mixed
+-abcs "hello"
+-absd="hello"
+-abcs1234
+```
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+## Mutating or "Normalizing" Flag names
+
+It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
+
+**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
+
+``` go
+func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ from := []string{"-", "_"}
+ to := "."
+ for _, sep := range from {
+ name = strings.Replace(name, sep, to, -1)
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
+```
+
+**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
+
+``` go
+func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ switch name {
+ case "old-flag-name":
+ name = "new-flag-name"
+ break
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
+```
+
+## Deprecating a flag or its shorthand
+It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
+
+**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
+```go
+// deprecate a flag by specifying its name and a usage message
+flags.MarkDeprecated("badflag", "please use --good-flag instead")
+```
+This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
+
+**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
+```go
+// deprecate a flag shorthand by specifying its flag name and a usage message
+flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
+```
+This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
+
+Note that usage message is essential here, and it should not be empty.
+
+## Hidden flags
+It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
+
+**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
+```go
+// hide a flag by specifying its name
+flags.MarkHidden("secretFlag")
+```
+
+## Disable sorting of flags
+`pflag` allows you to disable sorting of flags for help and usage message.
+
+**Example**:
+```go
+flags.BoolP("verbose", "v", false, "verbose output")
+flags.String("coolflag", "yeaah", "it's really cool flag")
+flags.Int("usefulflag", 777, "sometimes it's very useful")
+flags.SortFlags = false
+flags.PrintDefaults()
+```
+**Output**:
+```
+ -v, --verbose verbose output
+ --coolflag string it's really cool flag (default "yeaah")
+ --usefulflag int sometimes it's very useful (default 777)
+```
+
+
+## Supporting Go flags when using pflag
+In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
+to support flags defined by third-party dependencies (e.g. `golang/glog`).
+
+**Example**: You want to add the Go flags to the `CommandLine` flagset
+```go
+import (
+ goflag "flag"
+ flag "github.com/spf13/pflag"
+)
+
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+
+func main() {
+ flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
+ flag.Parse()
+}
+```
+
+## More info
+
+You can see the full reference documentation of the pflag package
+[at godoc.org][3], or through go's standard documentation system by
+running `godoc -http=:6060` and browsing to
+[http://localhost:6060/pkg/github.com/spf13/pflag][2] after
+installation.
+
+[2]: http://localhost:6060/pkg/github.com/spf13/pflag
+[3]: http://godoc.org/github.com/spf13/pflag
diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go
new file mode 100644
index 0000000..c4c5c0b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool.go
@@ -0,0 +1,94 @@
+package pflag
+
+import "strconv"
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+ Value
+ IsBoolFlag() bool
+}
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+ *p = val
+ return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+ v, err := strconv.ParseBool(s)
+ *b = boolValue(v)
+ return err
+}
+
+func (b *boolValue) Type() string {
+ return "bool"
+}
+
+func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+func boolConv(sval string) (interface{}, error) {
+ return strconv.ParseBool(sval)
+}
+
+// GetBool return the bool value of a flag with the given name
+func (f *FlagSet) GetBool(name string) (bool, error) {
+ val, err := f.getFlagType(name, "bool", boolConv)
+ if err != nil {
+ return false, err
+ }
+ return val.(bool), nil
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
+ f.BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, name string, value bool, usage string) {
+ BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
+ return f.BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
+ p := new(bool)
+ f.BoolVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+ return BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func BoolP(name, shorthand string, value bool, usage string) *bool {
+ b := CommandLine.BoolP(name, shorthand, value, usage)
+ return b
+}
diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go
new file mode 100644
index 0000000..3731370
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_slice.go
@@ -0,0 +1,185 @@
+package pflag
+
+import (
+ "io"
+ "strconv"
+ "strings"
+)
+
+// -- boolSlice Value
+type boolSliceValue struct {
+ value *[]bool
+ changed bool
+}
+
+func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
+ bsv := new(boolSliceValue)
+ bsv.value = p
+ *bsv.value = val
+ return bsv
+}
+
+// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
+// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
+func (s *boolSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse boolean values into slice
+ out := make([]bool, 0, len(boolStrSlice))
+ for _, boolStr := range boolStrSlice {
+ b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
+ if err != nil {
+ return err
+ }
+ out = append(out, b)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *boolSliceValue) Type() string {
+ return "boolSlice"
+}
+
+// String defines a "native" format for this boolean slice flag value.
+func (s *boolSliceValue) String() string {
+
+ boolStrSlice := make([]string, len(*s.value))
+ for i, b := range *s.value {
+ boolStrSlice[i] = strconv.FormatBool(b)
+ }
+
+ out, _ := writeAsCSV(boolStrSlice)
+
+ return "[" + out + "]"
+}
+
+func (s *boolSliceValue) fromString(val string) (bool, error) {
+ return strconv.ParseBool(val)
+}
+
+func (s *boolSliceValue) toString(val bool) string {
+ return strconv.FormatBool(val)
+}
+
+func (s *boolSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *boolSliceValue) Replace(val []string) error {
+ out := make([]bool, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *boolSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func boolSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []bool{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]bool, len(ss))
+ for i, t := range ss {
+ var err error
+ out[i], err = strconv.ParseBool(t)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetBoolSlice returns the []bool value of a flag with the given name.
+func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
+ val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
+ if err != nil {
+ return []bool{}, err
+ }
+ return val.([]bool), nil
+}
+
+// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func BoolSlice(name string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, "", value, usage)
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go
new file mode 100644
index 0000000..67d5304
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bytes.go
@@ -0,0 +1,209 @@
+package pflag
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "strings"
+)
+
+// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
+type bytesHexValue []byte
+
+// String implements pflag.Value.String.
+func (bytesHex bytesHexValue) String() string {
+ return fmt.Sprintf("%X", []byte(bytesHex))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesHex *bytesHexValue) Set(value string) error {
+ bin, err := hex.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesHex = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesHexValue) Type() string {
+ return "bytesHex"
+}
+
+func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue {
+ *p = val
+ return (*bytesHexValue)(p)
+}
+
+func bytesHexConv(sval string) (interface{}, error) {
+
+ bin, err := hex.DecodeString(sval)
+
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesHex return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesHex(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesHex", bytesHexConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesHexVarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesHexVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesHex(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesHexP(name, "", value, usage)
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesHexP(name, shorthand, value, usage)
+}
+
+// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
+type bytesBase64Value []byte
+
+// String implements pflag.Value.String.
+func (bytesBase64 bytesBase64Value) String() string {
+ return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesBase64 *bytesBase64Value) Set(value string) error {
+ bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesBase64 = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesBase64Value) Type() string {
+ return "bytesBase64"
+}
+
+func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
+ *p = val
+ return (*bytesBase64Value)(p)
+}
+
+func bytesBase64ValueConv(sval string) (interface{}, error) {
+
+ bin, err := base64.StdEncoding.DecodeString(sval)
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesBase64 return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesBase64(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, "", value, usage)
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go
new file mode 100644
index 0000000..a0b2679
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/count.go
@@ -0,0 +1,96 @@
+package pflag
+
+import "strconv"
+
+// -- count Value
+type countValue int
+
+func newCountValue(val int, p *int) *countValue {
+ *p = val
+ return (*countValue)(p)
+}
+
+func (i *countValue) Set(s string) error {
+ // "+1" means that no specific value was passed, so increment
+ if s == "+1" {
+ *i = countValue(*i + 1)
+ return nil
+ }
+ v, err := strconv.ParseInt(s, 0, 0)
+ *i = countValue(v)
+ return err
+}
+
+func (i *countValue) Type() string {
+ return "count"
+}
+
+func (i *countValue) String() string { return strconv.Itoa(int(*i)) }
+
+func countConv(sval string) (interface{}, error) {
+ i, err := strconv.Atoi(sval)
+ if err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+// GetCount return the int value of a flag with the given name
+func (f *FlagSet) GetCount(name string) (int, error) {
+ val, err := f.getFlagType(name, "count", countConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// CountVar defines a count flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+// A count flag will add 1 to its value every time it is found on the command line
+func (f *FlagSet) CountVar(p *int, name string, usage string) {
+ f.CountVarP(p, name, "", usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
+ flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
+ flag.NoOptDefVal = "+1"
+}
+
+// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
+func CountVar(p *int, name string, usage string) {
+ CommandLine.CountVar(p, name, usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func CountVarP(p *int, name, shorthand string, usage string) {
+ CommandLine.CountVarP(p, name, shorthand, usage)
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value every time it is found on the command line
+func (f *FlagSet) Count(name string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, "", usage)
+ return p
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, shorthand, usage)
+ return p
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func Count(name string, usage string) *int {
+ return CommandLine.CountP(name, "", usage)
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func CountP(name, shorthand string, usage string) *int {
+ return CommandLine.CountP(name, shorthand, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go
new file mode 100644
index 0000000..e9debef
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration.go
@@ -0,0 +1,86 @@
+package pflag
+
+import (
+ "time"
+)
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+ *p = val
+ return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+ v, err := time.ParseDuration(s)
+ *d = durationValue(v)
+ return err
+}
+
+func (d *durationValue) Type() string {
+ return "duration"
+}
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+func durationConv(sval string) (interface{}, error) {
+ return time.ParseDuration(sval)
+}
+
+// GetDuration return the duration value of a flag with the given name
+func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
+ val, err := f.getFlagType(name, "duration", durationConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(time.Duration), nil
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, "", value, usage)
+ return p
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func Duration(name string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, "", value, usage)
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go
new file mode 100644
index 0000000..badadda
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+// -- durationSlice Value
+type durationSliceValue struct {
+ value *[]time.Duration
+ changed bool
+}
+
+func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue {
+ dsv := new(durationSliceValue)
+ dsv.value = p
+ *dsv.value = val
+ return dsv
+}
+
+func (s *durationSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]time.Duration, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = time.ParseDuration(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *durationSliceValue) Type() string {
+ return "durationSlice"
+}
+
+func (s *durationSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%s", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *durationSliceValue) fromString(val string) (time.Duration, error) {
+ return time.ParseDuration(val)
+}
+
+func (s *durationSliceValue) toString(val time.Duration) string {
+ return fmt.Sprintf("%s", val)
+}
+
+func (s *durationSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *durationSliceValue) Replace(val []string) error {
+ out := make([]time.Duration, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *durationSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func durationSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []time.Duration{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]time.Duration, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = time.ParseDuration(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetDurationSlice returns the []time.Duration value of a flag with the given name
+func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) {
+ val, err := f.getFlagType(name, "durationSlice", durationSliceConv)
+ if err != nil {
+ return []time.Duration{}, err
+ }
+ return val.([]time.Duration), nil
+}
+
+// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string.
+// The argument p points to a []time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+ f.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+ f.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string.
+// The argument p points to a duration[] variable in which to store the value of the flag.
+func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+ CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+ CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+ p := []time.Duration{}
+ f.DurationSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+ p := []time.Duration{}
+ f.DurationSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+ return CommandLine.DurationSliceP(name, "", value, usage)
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+ return CommandLine.DurationSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
new file mode 100644
index 0000000..24a5036
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -0,0 +1,1239 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the GNU extensions to the POSIX recommendations
+for command-line options. See
+http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+Usage:
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+ import flag "github.com/spf13/pflag"
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+ var ip = flag.Int("flagname", 1234, "help message for flagname")
+If you like, you can bind the flag to a variable using the Var() functions.
+ var flagvar int
+ func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+ }
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+ flag.Var(&flagVal, "name", "help message for flagname")
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+ flag.Parse()
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+ fmt.Println("ip has value ", *ip)
+ fmt.Println("flagvar has value ", flagvar)
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+ var ip = flag.IntP("flagname", "f", 1234, "help message")
+ var flagvar bool
+ func init() {
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+ }
+ flag.VarP(&flagval, "varname", "v", "help message")
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+Command line flag syntax:
+ --flag // boolean flags only
+ --flag=x
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags.
+ // boolean flags
+ -f
+ -abc
+ // non-boolean flags
+ -n 1234
+ -Ifile
+ // mixed
+ -abcs "hello"
+ -abcn1234
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+*/
+package pflag
+
+import (
+ "bytes"
+ "errors"
+ goflag "flag"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+)
+
+// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
+var ErrHelp = errors.New("pflag: help requested")
+
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+const (
+ // ContinueOnError will return an err from Parse() if an error is found
+ ContinueOnError ErrorHandling = iota
+ // ExitOnError will call os.Exit(2) if an error is found when parsing
+ ExitOnError
+ // PanicOnError will panic() if an error is found when parsing flags
+ PanicOnError
+)
+
+// ParseErrorsWhitelist defines the parsing errors that can be ignored
+type ParseErrorsWhitelist struct {
+ // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags
+ UnknownFlags bool
+}
+
+// NormalizedName is a flag name that has been normalized according to rules
+// for the FlagSet (e.g. making '-' and '_' equivalent).
+type NormalizedName string
+
+// A FlagSet represents a set of defined flags.
+type FlagSet struct {
+ // Usage is the function called when an error occurs while parsing flags.
+ // The field is a function (not a method) that may be changed to point to
+ // a custom error handler.
+ Usage func()
+
+ // SortFlags is used to indicate, if user wants to have sorted flags in
+ // help/usage messages.
+ SortFlags bool
+
+ // ParseErrorsWhitelist is used to configure a whitelist of errors
+ ParseErrorsWhitelist ParseErrorsWhitelist
+
+ name string
+ parsed bool
+ actual map[NormalizedName]*Flag
+ orderedActual []*Flag
+ sortedActual []*Flag
+ formal map[NormalizedName]*Flag
+ orderedFormal []*Flag
+ sortedFormal []*Flag
+ shorthands map[byte]*Flag
+ args []string // arguments after flags
+ argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
+ errorHandling ErrorHandling
+ output io.Writer // nil means stderr; use out() accessor
+ interspersed bool // allow interspersed option/non-option args
+ normalizeNameFunc func(f *FlagSet, name string) NormalizedName
+
+ addedGoFlagSets []*goflag.FlagSet
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+ Name string // name as it appears on command line
+ Shorthand string // one-letter abbreviated flag
+ Usage string // help message
+ Value Value // value as set
+ DefValue string // default value (as text); for usage message
+ Changed bool // If the user set the value (or if left to default)
+ NoOptDefVal string // default value (as text); if the flag is on the command line without any options
+ Deprecated string // If this flag is deprecated, this string is the new or now thing to use
+ Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text
+ ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use
+ Annotations map[string][]string // used by cobra.Command bash autocomple code
+}
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+type Value interface {
+ String() string
+ Set(string) error
+ Type() string
+}
+
+// SliceValue is a secondary interface to all flags which hold a list
+// of values. This allows full control over the value of list flags,
+// and avoids complicated marshalling and unmarshalling to csv.
+type SliceValue interface {
+ // Append adds the specified value to the end of the flag value list.
+ Append(string) error
+ // Replace will fully overwrite any data currently in the flag value list.
+ Replace([]string) error
+ // GetSlice returns the flag value list as an array of strings.
+ GetSlice() []string
+}
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
+ list := make(sort.StringSlice, len(flags))
+ i := 0
+ for k := range flags {
+ list[i] = string(k)
+ i++
+ }
+ list.Sort()
+ result := make([]*Flag, len(list))
+ for i, name := range list {
+ result[i] = flags[NormalizedName(name)]
+ }
+ return result
+}
+
+// SetNormalizeFunc allows you to add a function which can translate flag names.
+// Flags added to the FlagSet will be translated and then when anything tries to
+// look up the flag that will also be translated. So it would be possible to create
+// a flag named "getURL" and have it translated to "geturl". A user could then pass
+// "--getUrl" which may also be translated to "geturl" and everything will work.
+func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
+ f.normalizeNameFunc = n
+ f.sortedFormal = f.sortedFormal[:0]
+ for fname, flag := range f.formal {
+ nname := f.normalizeFlagName(flag.Name)
+ if fname == nname {
+ continue
+ }
+ flag.Name = string(nname)
+ delete(f.formal, fname)
+ f.formal[nname] = flag
+ if _, set := f.actual[fname]; set {
+ delete(f.actual, fname)
+ f.actual[nname] = flag
+ }
+ }
+}
+
+// GetNormalizeFunc returns the previously set NormalizeFunc of a function which
+// does no translation, if not set previously.
+func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName {
+ if f.normalizeNameFunc != nil {
+ return f.normalizeNameFunc
+ }
+ return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) }
+}
+
+func (f *FlagSet) normalizeFlagName(name string) NormalizedName {
+ n := f.GetNormalizeFunc()
+ return n(f, name)
+}
+
+func (f *FlagSet) out() io.Writer {
+ if f.output == nil {
+ return os.Stderr
+ }
+ return f.output
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (f *FlagSet) SetOutput(output io.Writer) {
+ f.output = output
+}
+
+// VisitAll visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func (f *FlagSet) VisitAll(fn func(*Flag)) {
+ if len(f.formal) == 0 {
+ return
+ }
+
+ var flags []*Flag
+ if f.SortFlags {
+ if len(f.formal) != len(f.sortedFormal) {
+ f.sortedFormal = sortFlags(f.formal)
+ }
+ flags = f.sortedFormal
+ } else {
+ flags = f.orderedFormal
+ }
+
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+
+// HasFlags returns a bool to indicate if the FlagSet has any flags defined.
+func (f *FlagSet) HasFlags() bool {
+ return len(f.formal) > 0
+}
+
+// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags
+// that are not hidden.
+func (f *FlagSet) HasAvailableFlags() bool {
+ for _, flag := range f.formal {
+ if !flag.Hidden {
+ return true
+ }
+ }
+ return false
+}
+
+// VisitAll visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+ CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func (f *FlagSet) Visit(fn func(*Flag)) {
+ if len(f.actual) == 0 {
+ return
+ }
+
+ var flags []*Flag
+ if f.SortFlags {
+ if len(f.actual) != len(f.sortedActual) {
+ f.sortedActual = sortFlags(f.actual)
+ }
+ flags = f.sortedActual
+ } else {
+ flags = f.orderedActual
+ }
+
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+
+// Visit visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+ CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) Lookup(name string) *Flag {
+ return f.lookup(f.normalizeFlagName(name))
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+// It panics, if len(name) > 1.
+func (f *FlagSet) ShorthandLookup(name string) *Flag {
+ if name == "" {
+ return nil
+ }
+ if len(name) > 1 {
+ msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
+ }
+ c := name[0]
+ return f.shorthands[c]
+}
+
+// lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) lookup(name NormalizedName) *Flag {
+ return f.formal[name]
+}
+
+// func to return a given type for a given flag name
+func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
+ flag := f.Lookup(name)
+ if flag == nil {
+ err := fmt.Errorf("flag accessed but not defined: %s", name)
+ return nil, err
+ }
+
+ if flag.Value.Type() != ftype {
+ err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type())
+ return nil, err
+ }
+
+ sval := flag.Value.String()
+ result, err := convFunc(sval)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
+// found during arg parsing. This allows your program to know which args were
+// before the -- and which came after.
+func (f *FlagSet) ArgsLenAtDash() int {
+ return f.argsLenAtDash
+}
+
+// MarkDeprecated indicated that a flag is deprecated in your program. It will
+// continue to function but will not show up in help or usage messages. Using
+// this flag will also print the given usageMessage.
+func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if usageMessage == "" {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.Deprecated = usageMessage
+ flag.Hidden = true
+ return nil
+}
+
+// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your
+// program. It will continue to function but will not show up in help or usage
+// messages. Using this flag will also print the given usageMessage.
+func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if usageMessage == "" {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.ShorthandDeprecated = usageMessage
+ return nil
+}
+
+// MarkHidden sets a flag to 'hidden' in your program. It will continue to
+// function but will not show up in help or usage messages.
+func (f *FlagSet) MarkHidden(name string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ flag.Hidden = true
+ return nil
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+ return CommandLine.Lookup(name)
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+func ShorthandLookup(name string) *Flag {
+ return CommandLine.ShorthandLookup(name)
+}
+
+// Set sets the value of the named flag.
+func (f *FlagSet) Set(name, value string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+
+ err := flag.Value.Set(value)
+ if err != nil {
+ var flagName string
+ if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+ flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
+ } else {
+ flagName = fmt.Sprintf("--%s", flag.Name)
+ }
+ return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
+ }
+
+ if !flag.Changed {
+ if f.actual == nil {
+ f.actual = make(map[NormalizedName]*Flag)
+ }
+ f.actual[normalName] = flag
+ f.orderedActual = append(f.orderedActual, flag)
+
+ flag.Changed = true
+ }
+
+ if flag.Deprecated != "" {
+ fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+ }
+ return nil
+}
+
+// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet.
+// This is sometimes used by spf13/cobra programs which want to generate additional
+// bash completion information.
+func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[key] = values
+ return nil
+}
+
+// Changed returns true if the flag was explicitly set during Parse() and false
+// otherwise
+func (f *FlagSet) Changed(name string) bool {
+ flag := f.Lookup(name)
+ // If a flag doesn't exist, it wasn't changed....
+ if flag == nil {
+ return false
+ }
+ return flag.Changed
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+ return CommandLine.Set(name, value)
+}
+
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
+func (f *FlagSet) PrintDefaults() {
+ usages := f.FlagUsages()
+ fmt.Fprint(f.out(), usages)
+}
+
+// defaultIsZeroValue returns true if the default value for this flag represents
+// a zero value.
+func (f *Flag) defaultIsZeroValue() bool {
+ switch f.Value.(type) {
+ case boolFlag:
+ return f.DefValue == "false"
+ case *durationValue:
+ // Beginning in Go 1.7, duration zero values are "0s"
+ return f.DefValue == "0" || f.DefValue == "0s"
+ case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:
+ return f.DefValue == "0"
+ case *stringValue:
+ return f.DefValue == ""
+ case *ipValue, *ipMaskValue, *ipNetValue:
+ return f.DefValue == ""
+ case *intSliceValue, *stringSliceValue, *stringArrayValue:
+ return f.DefValue == "[]"
+ default:
+ switch f.Value.String() {
+ case "false":
+ return true
+ case "":
+ return true
+ case "":
+ return true
+ case "0":
+ return true
+ }
+ return false
+ }
+}
+
+// UnquoteUsage extracts a back-quoted name from the usage
+// string for a flag and returns it and the un-quoted usage.
+// Given "a `name` to show" it returns ("name", "a name to show").
+// If there are no back quotes, the name is an educated guess of the
+// type of the flag's value, or the empty string if the flag is boolean.
+func UnquoteUsage(flag *Flag) (name string, usage string) {
+ // Look for a back-quoted name, but avoid the strings package.
+ usage = flag.Usage
+ for i := 0; i < len(usage); i++ {
+ if usage[i] == '`' {
+ for j := i + 1; j < len(usage); j++ {
+ if usage[j] == '`' {
+ name = usage[i+1 : j]
+ usage = usage[:i] + name + usage[j+1:]
+ return name, usage
+ }
+ }
+ break // Only one back quote; use type name.
+ }
+ }
+
+ name = flag.Value.Type()
+ switch name {
+ case "bool":
+ name = ""
+ case "float64":
+ name = "float"
+ case "int64":
+ name = "int"
+ case "uint64":
+ name = "uint"
+ case "stringSlice":
+ name = "strings"
+ case "intSlice":
+ name = "ints"
+ case "uintSlice":
+ name = "uints"
+ case "boolSlice":
+ name = "bools"
+ }
+
+ return
+}
+
+// Splits the string `s` on whitespace into an initial substring up to
+// `i` runes in length and the remainder. Will go `slop` over `i` if
+// that encompasses the entire string (which allows the caller to
+// avoid short orphan words on the final line).
+func wrapN(i, slop int, s string) (string, string) {
+ if i+slop > len(s) {
+ return s, ""
+ }
+
+ w := strings.LastIndexAny(s[:i], " \t\n")
+ if w <= 0 {
+ return s, ""
+ }
+ nlPos := strings.LastIndex(s[:i], "\n")
+ if nlPos > 0 && nlPos < w {
+ return s[:nlPos], s[nlPos+1:]
+ }
+ return s[:w], s[w+1:]
+}
+
+// Wraps the string `s` to a maximum width `w` with leading indent
+// `i`. The first line is not indented (this is assumed to be done by
+// caller). Pass `w` == 0 to do no wrapping
+func wrap(i, w int, s string) string {
+ if w == 0 {
+ return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1)
+ }
+
+ // space between indent i and end of line width w into which
+ // we should wrap the text.
+ wrap := w - i
+
+ var r, l string
+
+ // Not enough space for sensible wrapping. Wrap as a block on
+ // the next line instead.
+ if wrap < 24 {
+ i = 16
+ wrap = w - i
+ r += "\n" + strings.Repeat(" ", i)
+ }
+ // If still not enough space then don't even try to wrap.
+ if wrap < 24 {
+ return strings.Replace(s, "\n", r, -1)
+ }
+
+ // Try to avoid short orphan words on the final line, by
+ // allowing wrapN to go a bit over if that would fit in the
+ // remainder of the line.
+ slop := 5
+ wrap = wrap - slop
+
+ // Handle first line, which is indented by the caller (or the
+ // special case above)
+ l, s = wrapN(wrap, slop, s)
+ r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1)
+
+ // Now wrap the rest
+ for s != "" {
+ var t string
+
+ t, s = wrapN(wrap, slop, s)
+ r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1)
+ }
+
+ return r
+
+}
+
+// FlagUsagesWrapped returns a string containing the usage information
+// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
+// wrapping)
+func (f *FlagSet) FlagUsagesWrapped(cols int) string {
+ buf := new(bytes.Buffer)
+
+ lines := make([]string, 0, len(f.formal))
+
+ maxlen := 0
+ f.VisitAll(func(flag *Flag) {
+ if flag.Hidden {
+ return
+ }
+
+ line := ""
+ if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+ line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name)
+ } else {
+ line = fmt.Sprintf(" --%s", flag.Name)
+ }
+
+ varname, usage := UnquoteUsage(flag)
+ if varname != "" {
+ line += " " + varname
+ }
+ if flag.NoOptDefVal != "" {
+ switch flag.Value.Type() {
+ case "string":
+ line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
+ case "bool":
+ if flag.NoOptDefVal != "true" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ case "count":
+ if flag.NoOptDefVal != "+1" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ default:
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ }
+
+ // This special character will be replaced with spacing once the
+ // correct alignment is calculated
+ line += "\x00"
+ if len(line) > maxlen {
+ maxlen = len(line)
+ }
+
+ line += usage
+ if !flag.defaultIsZeroValue() {
+ if flag.Value.Type() == "string" {
+ line += fmt.Sprintf(" (default %q)", flag.DefValue)
+ } else {
+ line += fmt.Sprintf(" (default %s)", flag.DefValue)
+ }
+ }
+ if len(flag.Deprecated) != 0 {
+ line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated)
+ }
+
+ lines = append(lines, line)
+ })
+
+ for _, line := range lines {
+ sidx := strings.Index(line, "\x00")
+ spacing := strings.Repeat(" ", maxlen-sidx)
+ // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
+ fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
+ }
+
+ return buf.String()
+}
+
+// FlagUsages returns a string containing the usage information for all flags in
+// the FlagSet
+func (f *FlagSet) FlagUsages() string {
+ return f.FlagUsagesWrapped(0)
+}
+
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+ CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(f *FlagSet) {
+ fmt.Fprintf(f.out(), "Usage of %s:\n", f.name)
+ f.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
+// The function is a variable that may be changed to point to a custom function.
+// By default it prints a simple header and calls PrintDefaults; for details about the
+// format of the output and how to control it, see the documentation for PrintDefaults.
+var Usage = func() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ PrintDefaults()
+}
+
+// NFlag returns the number of flags that have been set.
+func (f *FlagSet) NFlag() int { return len(f.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func (f *FlagSet) Arg(i int) string {
+ if i < 0 || i >= len(f.args) {
+ return ""
+ }
+ return f.args[i]
+}
+
+// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func Arg(i int) string {
+ return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (f *FlagSet) NArg() int { return len(f.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (f *FlagSet) Args() []string { return f.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (f *FlagSet) Var(value Value, name string, usage string) {
+ f.VarP(value, name, "", usage)
+}
+
+// VarPF is like VarP, but returns the flag created
+func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: name,
+ Shorthand: shorthand,
+ Usage: usage,
+ Value: value,
+ DefValue: value.String(),
+ }
+ f.AddFlag(flag)
+ return flag
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
+ f.VarPF(value, name, shorthand, usage)
+}
+
+// AddFlag will add the flag to the FlagSet
+func (f *FlagSet) AddFlag(flag *Flag) {
+ normalizedFlagName := f.normalizeFlagName(flag.Name)
+
+ _, alreadyThere := f.formal[normalizedFlagName]
+ if alreadyThere {
+ msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
+ fmt.Fprintln(f.out(), msg)
+ panic(msg) // Happens only if flags are declared with identical names
+ }
+ if f.formal == nil {
+ f.formal = make(map[NormalizedName]*Flag)
+ }
+
+ flag.Name = string(normalizedFlagName)
+ f.formal[normalizedFlagName] = flag
+ f.orderedFormal = append(f.orderedFormal, flag)
+
+ if flag.Shorthand == "" {
+ return
+ }
+ if len(flag.Shorthand) > 1 {
+ msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
+ }
+ if f.shorthands == nil {
+ f.shorthands = make(map[byte]*Flag)
+ }
+ c := flag.Shorthand[0]
+ used, alreadyThere := f.shorthands[c]
+ if alreadyThere {
+ msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
+ }
+ f.shorthands[c] = flag
+}
+
+// AddFlagSet adds one FlagSet to another. If a flag is already present in f
+// the flag from newSet will be ignored.
+func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(flag *Flag) {
+ if f.Lookup(flag.Name) == nil {
+ f.AddFlag(flag)
+ }
+ })
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, name string, usage string) {
+ CommandLine.VarP(value, name, "", usage)
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func VarP(value Value, name, shorthand, usage string) {
+ CommandLine.VarP(value, name, shorthand, usage)
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (f *FlagSet) failf(format string, a ...interface{}) error {
+ err := fmt.Errorf(format, a...)
+ if f.errorHandling != ContinueOnError {
+ fmt.Fprintln(f.out(), err)
+ f.usage()
+ }
+ return err
+}
+
+// usage calls the Usage method for the flag set, or the usage function if
+// the flag set is CommandLine.
+func (f *FlagSet) usage() {
+ if f == CommandLine {
+ Usage()
+ } else if f.Usage == nil {
+ defaultUsage(f)
+ } else {
+ f.Usage()
+ }
+}
+
+//--unknown (args will be empty)
+//--unknown --next-flag ... (args will be --next-flag ...)
+//--unknown arg ... (args will be arg ...)
+func stripUnknownFlagValue(args []string) []string {
+ if len(args) == 0 {
+ //--unknown
+ return args
+ }
+
+ first := args[0]
+ if len(first) > 0 && first[0] == '-' {
+ //--unknown --next-flag ...
+ return args
+ }
+
+ //--unknown arg ... (args will be arg ...)
+ if len(args) > 1 {
+ return args[1:]
+ }
+ return nil
+}
+
+func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
+ a = args
+ name := s[2:]
+ if len(name) == 0 || name[0] == '-' || name[0] == '=' {
+ err = f.failf("bad flag syntax: %s", s)
+ return
+ }
+
+ split := strings.SplitN(name, "=", 2)
+ name = split[0]
+ flag, exists := f.formal[f.normalizeFlagName(name)]
+
+ if !exists {
+ switch {
+ case name == "help":
+ f.usage()
+ return a, ErrHelp
+ case f.ParseErrorsWhitelist.UnknownFlags:
+ // --unknown=unknownval arg ...
+ // we do not want to lose arg in this case
+ if len(split) >= 2 {
+ return a, nil
+ }
+
+ return stripUnknownFlagValue(a), nil
+ default:
+ err = f.failf("unknown flag: --%s", name)
+ return
+ }
+ }
+
+ var value string
+ if len(split) == 2 {
+ // '--flag=arg'
+ value = split[1]
+ } else if flag.NoOptDefVal != "" {
+ // '--flag' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(a) > 0 {
+ // '--flag arg'
+ value = a[0]
+ a = a[1:]
+ } else {
+ // '--flag' (arg was required)
+ err = f.failf("flag needs an argument: %s", s)
+ return
+ }
+
+ err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
+ return
+}
+
+func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
+ outArgs = args
+
+ if strings.HasPrefix(shorthands, "test.") {
+ return
+ }
+
+ outShorts = shorthands[1:]
+ c := shorthands[0]
+
+ flag, exists := f.shorthands[c]
+ if !exists {
+ switch {
+ case c == 'h':
+ f.usage()
+ err = ErrHelp
+ return
+ case f.ParseErrorsWhitelist.UnknownFlags:
+ // '-f=arg arg ...'
+ // we do not want to lose arg in this case
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ outShorts = ""
+ return
+ }
+
+ outArgs = stripUnknownFlagValue(outArgs)
+ return
+ default:
+ err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
+ return
+ }
+ }
+
+ var value string
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ // '-f=arg'
+ value = shorthands[2:]
+ outShorts = ""
+ } else if flag.NoOptDefVal != "" {
+ // '-f' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(shorthands) > 1 {
+ // '-farg'
+ value = shorthands[1:]
+ outShorts = ""
+ } else if len(args) > 0 {
+ // '-f arg'
+ value = args[0]
+ outArgs = args[1:]
+ } else {
+ // '-f' (arg was required)
+ err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
+ return
+ }
+
+ if flag.ShorthandDeprecated != "" {
+ fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
+ }
+
+ err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
+ return
+}
+
+func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) {
+ a = args
+ shorthands := s[1:]
+
+ // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
+ for len(shorthands) > 0 {
+ shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ if len(s) == 0 || s[0] != '-' || len(s) == 1 {
+ if !f.interspersed {
+ f.args = append(f.args, s)
+ f.args = append(f.args, args...)
+ return nil
+ }
+ f.args = append(f.args, s)
+ continue
+ }
+
+ if s[1] == '-' {
+ if len(s) == 2 { // "--" terminates the flags
+ f.argsLenAtDash = len(f.args)
+ f.args = append(f.args, args...)
+ break
+ }
+ args, err = f.parseLongArg(s, args, fn)
+ } else {
+ args, err = f.parseShortArg(s, args, fn)
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name. Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help was set but not defined.
+func (f *FlagSet) Parse(arguments []string) error {
+ if f.addedGoFlagSets != nil {
+ for _, goFlagSet := range f.addedGoFlagSets {
+ goFlagSet.Parse(nil)
+ }
+ }
+ f.parsed = true
+
+ if len(arguments) < 0 {
+ return nil
+ }
+
+ f.args = make([]string, 0, len(arguments))
+
+ set := func(flag *Flag, value string) error {
+ return f.Set(flag.Name, value)
+ }
+
+ err := f.parseArgs(arguments, set)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ fmt.Println(err)
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+type parseFunc func(flag *Flag, value string) error
+
+// ParseAll parses flag definitions from the argument list, which should not
+// include the command name. The arguments for fn are flag and value. Must be
+// called after all flags in the FlagSet are defined and before flags are
+// accessed by the program. The return value will be ErrHelp if -help was set
+// but not defined.
+func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error {
+ f.parsed = true
+ f.args = make([]string, 0, len(arguments))
+
+ err := f.parseArgs(arguments, fn)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+// Parsed reports whether f.Parse has been called.
+func (f *FlagSet) Parsed() bool {
+ return f.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:]. Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.Parse(os.Args[1:])
+}
+
+// ParseAll parses the command-line flags from os.Args[1:] and called fn for each.
+// The arguments for fn are flag and value. Must be called after all flags are
+// defined and before flags are accessed by the program.
+func ParseAll(fn func(flag *Flag, value string) error) {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.ParseAll(os.Args[1:], fn)
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func SetInterspersed(interspersed bool) {
+ CommandLine.SetInterspersed(interspersed)
+}
+
+// Parsed returns true if the command-line flags have been parsed.
+func Parsed() bool {
+ return CommandLine.Parsed()
+}
+
+// CommandLine is the default set of command-line flags, parsed from os.Args.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name,
+// error handling property and SortFlags set to true.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+ f := &FlagSet{
+ name: name,
+ errorHandling: errorHandling,
+ argsLenAtDash: -1,
+ interspersed: true,
+ SortFlags: true,
+ }
+ return f
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func (f *FlagSet) SetInterspersed(interspersed bool) {
+ f.interspersed = interspersed
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
+ f.name = name
+ f.errorHandling = errorHandling
+ f.argsLenAtDash = -1
+}
diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go
new file mode 100644
index 0000000..a243f81
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- float32 Value
+type float32Value float32
+
+func newFloat32Value(val float32, p *float32) *float32Value {
+ *p = val
+ return (*float32Value)(p)
+}
+
+func (f *float32Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 32)
+ *f = float32Value(v)
+ return err
+}
+
+func (f *float32Value) Type() string {
+ return "float32"
+}
+
+func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) }
+
+func float32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseFloat(sval, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(v), nil
+}
+
+// GetFloat32 return the float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32(name string) (float32, error) {
+ val, err := f.getFlagType(name, "float32", float32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float32), nil
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func Float32Var(p *float32, name string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func Float32(name string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, "", value, usage)
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func Float32P(name, shorthand string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go
new file mode 100644
index 0000000..caa3527
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32_slice.go
@@ -0,0 +1,174 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- float32Slice Value
+type float32SliceValue struct {
+ value *[]float32
+ changed bool
+}
+
+func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue {
+ isv := new(float32SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *float32SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]float32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 float64
+ temp64, err = strconv.ParseFloat(d, 32)
+ if err != nil {
+ return err
+ }
+ out[i] = float32(temp64)
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *float32SliceValue) Type() string {
+ return "float32Slice"
+}
+
+func (s *float32SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%f", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *float32SliceValue) fromString(val string) (float32, error) {
+ t64, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(t64), nil
+}
+
+func (s *float32SliceValue) toString(val float32) string {
+ return fmt.Sprintf("%f", val)
+}
+
+func (s *float32SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *float32SliceValue) Replace(val []string) error {
+ out := make([]float32, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *float32SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func float32SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []float32{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]float32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 float64
+ temp64, err = strconv.ParseFloat(d, 32)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = float32(temp64)
+
+ }
+ return out, nil
+}
+
+// GetFloat32Slice return the []float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) {
+ val, err := f.getFlagType(name, "float32Slice", float32SliceConv)
+ if err != nil {
+ return []float32{}, err
+ }
+ return val.([]float32), nil
+}
+
+// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string.
+// The argument p points to a []float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) {
+ f.VarP(newFloat32SliceValue(value, p), name, "", usage)
+}
+
+// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) {
+ f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string.
+// The argument p points to a float32[] variable in which to store the value of the flag.
+func Float32SliceVar(p *[]float32, name string, value []float32, usage string) {
+ CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage)
+}
+
+// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) {
+ CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float32Slice defines a []float32 flag with specified name, default value, and usage string.
+// The return value is the address of a []float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 {
+ p := []float32{}
+ f.Float32SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 {
+ p := []float32{}
+ f.Float32SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Float32Slice defines a []float32 flag with specified name, default value, and usage string.
+// The return value is the address of a []float32 variable that stores the value of the flag.
+func Float32Slice(name string, value []float32, usage string) *[]float32 {
+ return CommandLine.Float32SliceP(name, "", value, usage)
+}
+
+// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash.
+func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 {
+ return CommandLine.Float32SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go
new file mode 100644
index 0000000..04b5492
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+ *p = val
+ return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ *f = float64Value(v)
+ return err
+}
+
+func (f *float64Value) Type() string {
+ return "float64"
+}
+
+func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
+
+func float64Conv(sval string) (interface{}, error) {
+ return strconv.ParseFloat(sval, 64)
+}
+
+// GetFloat64 return the float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64(name string) (float64, error) {
+ val, err := f.getFlagType(name, "float64", float64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float64), nil
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, name string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, "", value, usage)
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func Float64P(name, shorthand string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go
new file mode 100644
index 0000000..85bf307
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- float64Slice Value
+type float64SliceValue struct {
+ value *[]float64
+ changed bool
+}
+
+func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue {
+ isv := new(float64SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *float64SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]float64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseFloat(d, 64)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *float64SliceValue) Type() string {
+ return "float64Slice"
+}
+
+func (s *float64SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%f", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *float64SliceValue) fromString(val string) (float64, error) {
+ return strconv.ParseFloat(val, 64)
+}
+
+func (s *float64SliceValue) toString(val float64) string {
+ return fmt.Sprintf("%f", val)
+}
+
+func (s *float64SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *float64SliceValue) Replace(val []string) error {
+ out := make([]float64, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *float64SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func float64SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []float64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]float64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseFloat(d, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetFloat64Slice return the []float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) {
+ val, err := f.getFlagType(name, "float64Slice", float64SliceConv)
+ if err != nil {
+ return []float64{}, err
+ }
+ return val.([]float64), nil
+}
+
+// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string.
+// The argument p points to a []float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) {
+ f.VarP(newFloat64SliceValue(value, p), name, "", usage)
+}
+
+// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) {
+ f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string.
+// The argument p points to a float64[] variable in which to store the value of the flag.
+func Float64SliceVar(p *[]float64, name string, value []float64, usage string) {
+ CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage)
+}
+
+// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) {
+ CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float64Slice defines a []float64 flag with specified name, default value, and usage string.
+// The return value is the address of a []float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 {
+ p := []float64{}
+ f.Float64SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 {
+ p := []float64{}
+ f.Float64SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Float64Slice defines a []float64 flag with specified name, default value, and usage string.
+// The return value is the address of a []float64 variable that stores the value of the flag.
+func Float64Slice(name string, value []float64, usage string) *[]float64 {
+ return CommandLine.Float64SliceP(name, "", value, usage)
+}
+
+// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash.
+func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 {
+ return CommandLine.Float64SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go
new file mode 100644
index 0000000..d3dd72b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/golangflag.go
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ goflag "flag"
+ "reflect"
+ "strings"
+)
+
+// flagValueWrapper implements pflag.Value around a flag.Value. The main
+// difference here is the addition of the Type method that returns a string
+// name of the type. As this is generally unknown, we approximate that with
+// reflection.
+type flagValueWrapper struct {
+ inner goflag.Value
+ flagType string
+}
+
+// We are just copying the boolFlag interface out of goflag as that is what
+// they use to decide if a flag should get "true" when no arg is given.
+type goBoolFlag interface {
+ goflag.Value
+ IsBoolFlag() bool
+}
+
+func wrapFlagValue(v goflag.Value) Value {
+ // If the flag.Value happens to also be a pflag.Value, just use it directly.
+ if pv, ok := v.(Value); ok {
+ return pv
+ }
+
+ pv := &flagValueWrapper{
+ inner: v,
+ }
+
+ t := reflect.TypeOf(v)
+ if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ pv.flagType = strings.TrimSuffix(t.Name(), "Value")
+ return pv
+}
+
+func (v *flagValueWrapper) String() string {
+ return v.inner.String()
+}
+
+func (v *flagValueWrapper) Set(s string) error {
+ return v.inner.Set(s)
+}
+
+func (v *flagValueWrapper) Type() string {
+ return v.flagType
+}
+
+// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag
+// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei
+// with both `-v` and `--v` in flags. If the golang flag was more than a single
+// character (ex: `verbose`) it will only be accessible via `--verbose`
+func PFlagFromGoFlag(goflag *goflag.Flag) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: goflag.Name,
+ Usage: goflag.Usage,
+ Value: wrapFlagValue(goflag.Value),
+ // Looks like golang flags don't set DefValue correctly :-(
+ //DefValue: goflag.DefValue,
+ DefValue: goflag.Value.String(),
+ }
+ // Ex: if the golang flag was -v, allow both -v and --v to work
+ if len(flag.Name) == 1 {
+ flag.Shorthand = flag.Name
+ }
+ if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() {
+ flag.NoOptDefVal = "true"
+ }
+ return flag
+}
+
+// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet
+func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) {
+ if f.Lookup(goflag.Name) != nil {
+ return
+ }
+ newflag := PFlagFromGoFlag(goflag)
+ f.AddFlag(newflag)
+}
+
+// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet
+func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(goflag *goflag.Flag) {
+ f.AddGoFlag(goflag)
+ })
+ if f.addedGoFlagSets == nil {
+ f.addedGoFlagSets = make([]*goflag.FlagSet, 0)
+ }
+ f.addedGoFlagSets = append(f.addedGoFlagSets, newSet)
+}
diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go
new file mode 100644
index 0000000..1474b89
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+ *p = val
+ return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = intValue(v)
+ return err
+}
+
+func (i *intValue) Type() string {
+ return "int"
+}
+
+func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
+
+func intConv(sval string) (interface{}, error) {
+ return strconv.Atoi(sval)
+}
+
+// GetInt return the int value of a flag with the given name
+func (f *FlagSet) GetInt(name string) (int, error) {
+ val, err := f.getFlagType(name, "int", intConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, name string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func IntVarP(p *int, name, shorthand string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (f *FlagSet) Int(name string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, "", value, usage)
+ return p
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+ return CommandLine.IntP(name, "", value, usage)
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func IntP(name, shorthand string, value int, usage string) *int {
+ return CommandLine.IntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go
new file mode 100644
index 0000000..f1a01d0
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int16 Value
+type int16Value int16
+
+func newInt16Value(val int16, p *int16) *int16Value {
+ *p = val
+ return (*int16Value)(p)
+}
+
+func (i *int16Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 16)
+ *i = int16Value(v)
+ return err
+}
+
+func (i *int16Value) Type() string {
+ return "int16"
+}
+
+func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int16(v), nil
+}
+
+// GetInt16 returns the int16 value of a flag with the given name
+func (f *FlagSet) GetInt16(name string) (int16, error) {
+ val, err := f.getFlagType(name, "int16", int16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int16), nil
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func Int16Var(p *int16, name string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func (f *FlagSet) Int16(name string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func Int16(name string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, "", value, usage)
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func Int16P(name, shorthand string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go
new file mode 100644
index 0000000..9b95944
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int32 Value
+type int32Value int32
+
+func newInt32Value(val int32, p *int32) *int32Value {
+ *p = val
+ return (*int32Value)(p)
+}
+
+func (i *int32Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 32)
+ *i = int32Value(v)
+ return err
+}
+
+func (i *int32Value) Type() string {
+ return "int32"
+}
+
+func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(v), nil
+}
+
+// GetInt32 return the int32 value of a flag with the given name
+func (f *FlagSet) GetInt32(name string) (int32, error) {
+ val, err := f.getFlagType(name, "int32", int32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int32), nil
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func Int32Var(p *int32, name string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func Int32(name string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, "", value, usage)
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func Int32P(name, shorthand string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go
new file mode 100644
index 0000000..ff128ff
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32_slice.go
@@ -0,0 +1,174 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- int32Slice Value
+type int32SliceValue struct {
+ value *[]int32
+ changed bool
+}
+
+func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue {
+ isv := new(int32SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *int32SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 int64
+ temp64, err = strconv.ParseInt(d, 0, 32)
+ if err != nil {
+ return err
+ }
+ out[i] = int32(temp64)
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *int32SliceValue) Type() string {
+ return "int32Slice"
+}
+
+func (s *int32SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *int32SliceValue) fromString(val string) (int32, error) {
+ t64, err := strconv.ParseInt(val, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(t64), nil
+}
+
+func (s *int32SliceValue) toString(val int32) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *int32SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *int32SliceValue) Replace(val []string) error {
+ out := make([]int32, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *int32SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func int32SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int32{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 int64
+ temp64, err = strconv.ParseInt(d, 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = int32(temp64)
+
+ }
+ return out, nil
+}
+
+// GetInt32Slice return the []int32 value of a flag with the given name
+func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) {
+ val, err := f.getFlagType(name, "int32Slice", int32SliceConv)
+ if err != nil {
+ return []int32{}, err
+ }
+ return val.([]int32), nil
+}
+
+// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string.
+// The argument p points to a []int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) {
+ f.VarP(newInt32SliceValue(value, p), name, "", usage)
+}
+
+// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) {
+ f.VarP(newInt32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string.
+// The argument p points to a int32[] variable in which to store the value of the flag.
+func Int32SliceVar(p *[]int32, name string, value []int32, usage string) {
+ CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage)
+}
+
+// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) {
+ CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int32Slice defines a []int32 flag with specified name, default value, and usage string.
+// The return value is the address of a []int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 {
+ p := []int32{}
+ f.Int32SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 {
+ p := []int32{}
+ f.Int32SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Int32Slice defines a []int32 flag with specified name, default value, and usage string.
+// The return value is the address of a []int32 variable that stores the value of the flag.
+func Int32Slice(name string, value []int32, usage string) *[]int32 {
+ return CommandLine.Int32SliceP(name, "", value, usage)
+}
+
+// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash.
+func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 {
+ return CommandLine.Int32SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go
new file mode 100644
index 0000000..0026d78
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+ *p = val
+ return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = int64Value(v)
+ return err
+}
+
+func (i *int64Value) Type() string {
+ return "int64"
+}
+
+func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int64Conv(sval string) (interface{}, error) {
+ return strconv.ParseInt(sval, 0, 64)
+}
+
+// GetInt64 return the int64 value of a flag with the given name
+func (f *FlagSet) GetInt64(name string) (int64, error) {
+ val, err := f.getFlagType(name, "int64", int64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int64), nil
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, name string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, "", value, usage)
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func Int64P(name, shorthand string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go
new file mode 100644
index 0000000..2546463
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- int64Slice Value
+type int64SliceValue struct {
+ value *[]int64
+ changed bool
+}
+
+func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue {
+ isv := new(int64SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *int64SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseInt(d, 0, 64)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *int64SliceValue) Type() string {
+ return "int64Slice"
+}
+
+func (s *int64SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *int64SliceValue) fromString(val string) (int64, error) {
+ return strconv.ParseInt(val, 0, 64)
+}
+
+func (s *int64SliceValue) toString(val int64) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *int64SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *int64SliceValue) Replace(val []string) error {
+ out := make([]int64, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *int64SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func int64SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseInt(d, 0, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetInt64Slice return the []int64 value of a flag with the given name
+func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) {
+ val, err := f.getFlagType(name, "int64Slice", int64SliceConv)
+ if err != nil {
+ return []int64{}, err
+ }
+ return val.([]int64), nil
+}
+
+// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string.
+// The argument p points to a []int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) {
+ f.VarP(newInt64SliceValue(value, p), name, "", usage)
+}
+
+// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) {
+ f.VarP(newInt64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string.
+// The argument p points to a int64[] variable in which to store the value of the flag.
+func Int64SliceVar(p *[]int64, name string, value []int64, usage string) {
+ CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage)
+}
+
+// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) {
+ CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int64Slice defines a []int64 flag with specified name, default value, and usage string.
+// The return value is the address of a []int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 {
+ p := []int64{}
+ f.Int64SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 {
+ p := []int64{}
+ f.Int64SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Int64Slice defines a []int64 flag with specified name, default value, and usage string.
+// The return value is the address of a []int64 variable that stores the value of the flag.
+func Int64Slice(name string, value []int64, usage string) *[]int64 {
+ return CommandLine.Int64SliceP(name, "", value, usage)
+}
+
+// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash.
+func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 {
+ return CommandLine.Int64SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go
new file mode 100644
index 0000000..4da9222
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int8 Value
+type int8Value int8
+
+func newInt8Value(val int8, p *int8) *int8Value {
+ *p = val
+ return (*int8Value)(p)
+}
+
+func (i *int8Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 8)
+ *i = int8Value(v)
+ return err
+}
+
+func (i *int8Value) Type() string {
+ return "int8"
+}
+
+func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return int8(v), nil
+}
+
+// GetInt8 return the int8 value of a flag with the given name
+func (f *FlagSet) GetInt8(name string) (int8, error) {
+ val, err := f.getFlagType(name, "int8", int8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int8), nil
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func Int8Var(p *int8, name string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func Int8(name string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, "", value, usage)
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func Int8P(name, shorthand string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go
new file mode 100644
index 0000000..e71c39d
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int_slice.go
@@ -0,0 +1,158 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- intSlice Value
+type intSliceValue struct {
+ value *[]int
+ changed bool
+}
+
+func newIntSliceValue(val []int, p *[]int) *intSliceValue {
+ isv := new(intSliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *intSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *intSliceValue) Type() string {
+ return "intSlice"
+}
+
+func (s *intSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *intSliceValue) Append(val string) error {
+ i, err := strconv.Atoi(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *intSliceValue) Replace(val []string) error {
+ out := make([]int, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *intSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = strconv.Itoa(d)
+ }
+ return out
+}
+
+func intSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetIntSlice return the []int value of a flag with the given name
+func (f *FlagSet) GetIntSlice(name string) ([]int, error) {
+ val, err := f.getFlagType(name, "intSlice", intSliceConv)
+ if err != nil {
+ return []int{}, err
+ }
+ return val.([]int), nil
+}
+
+// IntSliceVar defines a intSlice flag with specified name, default value, and usage string.
+// The argument p points to a []int variable in which to store the value of the flag.
+func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSliceVar defines a int[] flag with specified name, default value, and usage string.
+// The argument p points to a int[] variable in which to store the value of the flag.
+func IntSliceVar(p *[]int, name string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func IntSlice(name string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, "", value, usage)
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go
new file mode 100644
index 0000000..3d414ba
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip.go
@@ -0,0 +1,94 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// -- net.IP value
+type ipValue net.IP
+
+func newIPValue(val net.IP, p *net.IP) *ipValue {
+ *p = val
+ return (*ipValue)(p)
+}
+
+func (i *ipValue) String() string { return net.IP(*i).String() }
+func (i *ipValue) Set(s string) error {
+ ip := net.ParseIP(strings.TrimSpace(s))
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP: %q", s)
+ }
+ *i = ipValue(ip)
+ return nil
+}
+
+func (i *ipValue) Type() string {
+ return "ip"
+}
+
+func ipConv(sval string) (interface{}, error) {
+ ip := net.ParseIP(sval)
+ if ip != nil {
+ return ip, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+}
+
+// GetIP return the net.IP value of a flag with the given name
+func (f *FlagSet) GetIP(name string) (net.IP, error) {
+ val, err := f.getFlagType(name, "ip", ipConv)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IP), nil
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func IPVar(p *net.IP, name string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func IP(name string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, "", value, usage)
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go
new file mode 100644
index 0000000..775faae
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip_slice.go
@@ -0,0 +1,186 @@
+package pflag
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strings"
+)
+
+// -- ipSlice Value
+type ipSliceValue struct {
+ value *[]net.IP
+ changed bool
+}
+
+func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue {
+ ipsv := new(ipSliceValue)
+ ipsv.value = p
+ *ipsv.value = val
+ return ipsv
+}
+
+// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag.
+// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended.
+func (s *ipSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ ipStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse ip values into slice
+ out := make([]net.IP, 0, len(ipStrSlice))
+ for _, ipStr := range ipStrSlice {
+ ip := net.ParseIP(strings.TrimSpace(ipStr))
+ if ip == nil {
+ return fmt.Errorf("invalid string being converted to IP address: %s", ipStr)
+ }
+ out = append(out, ip)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *ipSliceValue) Type() string {
+ return "ipSlice"
+}
+
+// String defines a "native" format for this net.IP slice flag value.
+func (s *ipSliceValue) String() string {
+
+ ipStrSlice := make([]string, len(*s.value))
+ for i, ip := range *s.value {
+ ipStrSlice[i] = ip.String()
+ }
+
+ out, _ := writeAsCSV(ipStrSlice)
+
+ return "[" + out + "]"
+}
+
+func (s *ipSliceValue) fromString(val string) (net.IP, error) {
+ return net.ParseIP(strings.TrimSpace(val)), nil
+}
+
+func (s *ipSliceValue) toString(val net.IP) string {
+ return val.String()
+}
+
+func (s *ipSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *ipSliceValue) Replace(val []string) error {
+ out := make([]net.IP, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *ipSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func ipSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []net.IP{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]net.IP, len(ss))
+ for i, sval := range ss {
+ ip := net.ParseIP(strings.TrimSpace(sval))
+ if ip == nil {
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+ }
+ out[i] = ip
+ }
+ return out, nil
+}
+
+// GetIPSlice returns the []net.IP value of a flag with the given name
+func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) {
+ val, err := f.getFlagType(name, "ipSlice", ipSliceConv)
+ if err != nil {
+ return []net.IP{}, err
+ }
+ return val.([]net.IP), nil
+}
+
+// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of that flag.
+func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of the flag.
+func IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, "", value, usage)
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go
new file mode 100644
index 0000000..5bd44bd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipmask.go
@@ -0,0 +1,122 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+)
+
+// -- net.IPMask value
+type ipMaskValue net.IPMask
+
+func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
+ *p = val
+ return (*ipMaskValue)(p)
+}
+
+func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
+func (i *ipMaskValue) Set(s string) error {
+ ip := ParseIPv4Mask(s)
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP mask: %q", s)
+ }
+ *i = ipMaskValue(ip)
+ return nil
+}
+
+func (i *ipMaskValue) Type() string {
+ return "ipMask"
+}
+
+// ParseIPv4Mask written in IP form (e.g. 255.255.255.0).
+// This function should really belong to the net package.
+func ParseIPv4Mask(s string) net.IPMask {
+ mask := net.ParseIP(s)
+ if mask == nil {
+ if len(s) != 8 {
+ return nil
+ }
+ // net.IPMask.String() actually outputs things like ffffff00
+ // so write a horrible parser for that as well :-(
+ m := []int{}
+ for i := 0; i < 4; i++ {
+ b := "0x" + s[2*i:2*i+2]
+ d, err := strconv.ParseInt(b, 0, 0)
+ if err != nil {
+ return nil
+ }
+ m = append(m, int(d))
+ }
+ s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3])
+ mask = net.ParseIP(s)
+ if mask == nil {
+ return nil
+ }
+ }
+ return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
+}
+
+func parseIPv4Mask(sval string) (interface{}, error) {
+ mask := ParseIPv4Mask(sval)
+ if mask == nil {
+ return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval)
+ }
+ return mask, nil
+}
+
+// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name
+func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) {
+ val, err := f.getFlagType(name, "ipMask", parseIPv4Mask)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IPMask), nil
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, "", value, usage)
+}
+
+// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go
new file mode 100644
index 0000000..e2c1b8b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipnet.go
@@ -0,0 +1,98 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// IPNet adapts net.IPNet for use as a flag.
+type ipNetValue net.IPNet
+
+func (ipnet ipNetValue) String() string {
+ n := net.IPNet(ipnet)
+ return n.String()
+}
+
+func (ipnet *ipNetValue) Set(value string) error {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(value))
+ if err != nil {
+ return err
+ }
+ *ipnet = ipNetValue(*n)
+ return nil
+}
+
+func (*ipNetValue) Type() string {
+ return "ipNet"
+}
+
+func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
+ *p = val
+ return (*ipNetValue)(p)
+}
+
+func ipNetConv(sval string) (interface{}, error) {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(sval))
+ if err == nil {
+ return *n, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval)
+}
+
+// GetIPNet return the net.IPNet value of a flag with the given name
+func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) {
+ val, err := f.getFlagType(name, "ipNet", ipNetConv)
+ if err != nil {
+ return net.IPNet{}, err
+ }
+ return val.(net.IPNet), nil
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, "", value, usage)
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go
new file mode 100644
index 0000000..04e0a26
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string.go
@@ -0,0 +1,80 @@
+package pflag
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+ *p = val
+ return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+ *s = stringValue(val)
+ return nil
+}
+func (s *stringValue) Type() string {
+ return "string"
+}
+
+func (s *stringValue) String() string { return string(*s) }
+
+func stringConv(sval string) (interface{}, error) {
+ return sval, nil
+}
+
+// GetString return the string value of a flag with the given name
+func (f *FlagSet) GetString(name string) (string, error) {
+ val, err := f.getFlagType(name, "string", stringConv)
+ if err != nil {
+ return "", err
+ }
+ return val.(string), nil
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, name string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringVarP(p *string, name, shorthand string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (f *FlagSet) String(name string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, "", value, usage)
+ return p
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+ return CommandLine.StringP(name, "", value, usage)
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func StringP(name, shorthand string, value string, usage string) *string {
+ return CommandLine.StringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go
new file mode 100644
index 0000000..4894af8
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_array.go
@@ -0,0 +1,129 @@
+package pflag
+
+// -- stringArray Value
+type stringArrayValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringArrayValue(val []string, p *[]string) *stringArrayValue {
+ ssv := new(stringArrayValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func (s *stringArrayValue) Set(val string) error {
+ if !s.changed {
+ *s.value = []string{val}
+ s.changed = true
+ } else {
+ *s.value = append(*s.value, val)
+ }
+ return nil
+}
+
+func (s *stringArrayValue) Append(val string) error {
+ *s.value = append(*s.value, val)
+ return nil
+}
+
+func (s *stringArrayValue) Replace(val []string) error {
+ out := make([]string, len(val))
+ for i, d := range val {
+ var err error
+ out[i] = d
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *stringArrayValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = d
+ }
+ return out
+}
+
+func (s *stringArrayValue) Type() string {
+ return "stringArray"
+}
+
+func (s *stringArrayValue) String() string {
+ str, _ := writeAsCSV(*s.value)
+ return "[" + str + "]"
+}
+
+func stringArrayConv(sval string) (interface{}, error) {
+ sval = sval[1 : len(sval)-1]
+ // An empty string would cause a array with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ return readAsCSV(sval)
+}
+
+// GetStringArray return the []string value of a flag with the given name
+func (f *FlagSet) GetStringArray(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringArray", stringArrayConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArrayVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringArrayVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringArrayVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArray(name string, value []string, usage string) *[]string {
+ return CommandLine.StringArrayP(name, "", value, usage)
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringArrayP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go
new file mode 100644
index 0000000..3cb2e69
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_slice.go
@@ -0,0 +1,163 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "strings"
+)
+
+// -- stringSlice Value
+type stringSliceValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
+ ssv := new(stringSliceValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func readAsCSV(val string) ([]string, error) {
+ if val == "" {
+ return []string{}, nil
+ }
+ stringReader := strings.NewReader(val)
+ csvReader := csv.NewReader(stringReader)
+ return csvReader.Read()
+}
+
+func writeAsCSV(vals []string) (string, error) {
+ b := &bytes.Buffer{}
+ w := csv.NewWriter(b)
+ err := w.Write(vals)
+ if err != nil {
+ return "", err
+ }
+ w.Flush()
+ return strings.TrimSuffix(b.String(), "\n"), nil
+}
+
+func (s *stringSliceValue) Set(val string) error {
+ v, err := readAsCSV(val)
+ if err != nil {
+ return err
+ }
+ if !s.changed {
+ *s.value = v
+ } else {
+ *s.value = append(*s.value, v...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringSliceValue) Type() string {
+ return "stringSlice"
+}
+
+func (s *stringSliceValue) String() string {
+ str, _ := writeAsCSV(*s.value)
+ return "[" + str + "]"
+}
+
+func (s *stringSliceValue) Append(val string) error {
+ *s.value = append(*s.value, val)
+ return nil
+}
+
+func (s *stringSliceValue) Replace(val []string) error {
+ *s.value = val
+ return nil
+}
+
+func (s *stringSliceValue) GetSlice() []string {
+ return *s.value
+}
+
+func stringSliceConv(sval string) (interface{}, error) {
+ sval = sval[1 : len(sval)-1]
+ // An empty string would cause a slice with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ return readAsCSV(sval)
+}
+
+// GetStringSlice return the []string value of a flag with the given name
+func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringSlice", stringSliceConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func StringSliceVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func StringSlice(name string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, "", value, usage)
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go
new file mode 100644
index 0000000..5ceda39
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- stringToInt Value
+type stringToIntValue struct {
+ value *map[string]int
+ changed bool
+}
+
+func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue {
+ ssv := new(stringToIntValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToIntValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return err
+ }
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToIntValue) Type() string {
+ return "stringToInt"
+}
+
+func (s *stringToIntValue) String() string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range *s.value {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.Itoa(v))
+ i++
+ }
+ return "[" + buf.String() + "]"
+}
+
+func stringToIntConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetStringToInt return the map[string]int value of a flag with the given name
+func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) {
+ val, err := f.getFlagType(name, "stringToInt", stringToIntConv)
+ if err != nil {
+ return map[string]int{}, err
+ }
+ return val.(map[string]int), nil
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, "", value, usage)
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go
new file mode 100644
index 0000000..a807a04
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int64.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- stringToInt64 Value
+type stringToInt64Value struct {
+ value *map[string]int64
+ changed bool
+}
+
+func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value {
+ ssv := new(stringToInt64Value)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToInt64Value) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make(map[string]int64, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64)
+ if err != nil {
+ return err
+ }
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToInt64Value) Type() string {
+ return "stringToInt64"
+}
+
+func (s *stringToInt64Value) String() string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range *s.value {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.FormatInt(v, 10))
+ i++
+ }
+ return "[" + buf.String() + "]"
+}
+
+func stringToInt64Conv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]int64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make(map[string]int64, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetStringToInt64 return the map[string]int64 value of a flag with the given name
+func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) {
+ val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv)
+ if err != nil {
+ return map[string]int64{}, err
+ }
+ return val.(map[string]int64), nil
+}
+
+// StringToInt64Var defines a string flag with specified name, default value, and usage string.
+// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) {
+ f.VarP(newStringToInt64Value(value, p), name, "", usage)
+}
+
+// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) {
+ f.VarP(newStringToInt64Value(value, p), name, shorthand, usage)
+}
+
+// StringToInt64Var defines a string flag with specified name, default value, and usage string.
+// The argument p point64s to a map[string]int64 variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) {
+ CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage)
+}
+
+// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash.
+func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) {
+ CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage)
+}
+
+// StringToInt64 defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int64 variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 {
+ p := map[string]int64{}
+ f.StringToInt64VarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 {
+ p := map[string]int64{}
+ f.StringToInt64VarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToInt64 defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int64 variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 {
+ return CommandLine.StringToInt64P(name, "", value, usage)
+}
+
+// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash.
+func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 {
+ return CommandLine.StringToInt64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go
new file mode 100644
index 0000000..890a01a
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_string.go
@@ -0,0 +1,160 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "fmt"
+ "strings"
+)
+
+// -- stringToString Value
+type stringToStringValue struct {
+ value *map[string]string
+ changed bool
+}
+
+func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue {
+ ssv := new(stringToStringValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToStringValue) Set(val string) error {
+ var ss []string
+ n := strings.Count(val, "=")
+ switch n {
+ case 0:
+ return fmt.Errorf("%s must be formatted as key=value", val)
+ case 1:
+ ss = append(ss, strings.Trim(val, `"`))
+ default:
+ r := csv.NewReader(strings.NewReader(val))
+ var err error
+ ss, err = r.Read()
+ if err != nil {
+ return err
+ }
+ }
+
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToStringValue) Type() string {
+ return "stringToString"
+}
+
+func (s *stringToStringValue) String() string {
+ records := make([]string, 0, len(*s.value)>>1)
+ for k, v := range *s.value {
+ records = append(records, k+"="+v)
+ }
+
+ var buf bytes.Buffer
+ w := csv.NewWriter(&buf)
+ if err := w.Write(records); err != nil {
+ panic(err)
+ }
+ w.Flush()
+ return "[" + strings.TrimSpace(buf.String()) + "]"
+}
+
+func stringToStringConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]string{}, nil
+ }
+ r := csv.NewReader(strings.NewReader(val))
+ ss, err := r.Read()
+ if err != nil {
+ return nil, err
+ }
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ return out, nil
+}
+
+// GetStringToString return the map[string]string value of a flag with the given name
+func (f *FlagSet) GetStringToString(name string) (map[string]string, error) {
+ val, err := f.getFlagType(name, "stringToString", stringToStringConv)
+ if err != nil {
+ return map[string]string{}, err
+ }
+ return val.(map[string]string), nil
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToString(name string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, "", value, usage)
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go
new file mode 100644
index 0000000..dcbc2b7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+ *p = val
+ return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uintValue(v)
+ return err
+}
+
+func (i *uintValue) Type() string {
+ return "uint"
+}
+
+func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uintConv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(v), nil
+}
+
+// GetUint return the uint value of a flag with the given name
+func (f *FlagSet) GetUint(name string) (uint, error) {
+ val, err := f.getFlagType(name, "uint", uintConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint), nil
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func UintVar(p *uint, name string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, "", value, usage)
+ return p
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, "", value, usage)
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func UintP(name, shorthand string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go
new file mode 100644
index 0000000..7e9914e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint16 value
+type uint16Value uint16
+
+func newUint16Value(val uint16, p *uint16) *uint16Value {
+ *p = val
+ return (*uint16Value)(p)
+}
+
+func (i *uint16Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 16)
+ *i = uint16Value(v)
+ return err
+}
+
+func (i *uint16Value) Type() string {
+ return "uint16"
+}
+
+func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return uint16(v), nil
+}
+
+// GetUint16 return the uint16 value of a flag with the given name
+func (f *FlagSet) GetUint16(name string) (uint16, error) {
+ val, err := f.getFlagType(name, "uint16", uint16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint16), nil
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func Uint16Var(p *uint16, name string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint16(name string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, "", value, usage)
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go
new file mode 100644
index 0000000..d802453
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint32 value
+type uint32Value uint32
+
+func newUint32Value(val uint32, p *uint32) *uint32Value {
+ *p = val
+ return (*uint32Value)(p)
+}
+
+func (i *uint32Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 32)
+ *i = uint32Value(v)
+ return err
+}
+
+func (i *uint32Value) Type() string {
+ return "uint32"
+}
+
+func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(v), nil
+}
+
+// GetUint32 return the uint32 value of a flag with the given name
+func (f *FlagSet) GetUint32(name string) (uint32, error) {
+ val, err := f.getFlagType(name, "uint32", uint32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint32), nil
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func Uint32Var(p *uint32, name string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func Uint32(name string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, "", value, usage)
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go
new file mode 100644
index 0000000..f62240f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint64.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+ *p = val
+ return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uint64Value(v)
+ return err
+}
+
+func (i *uint64Value) Type() string {
+ return "uint64"
+}
+
+func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint64Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 64)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(v), nil
+}
+
+// GetUint64 return the uint64 value of a flag with the given name
+func (f *FlagSet) GetUint64(name string) (uint64, error) {
+ val, err := f.getFlagType(name, "uint64", uint64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint64), nil
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, name string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, "", value, usage)
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go
new file mode 100644
index 0000000..bb0e83c
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint8 Value
+type uint8Value uint8
+
+func newUint8Value(val uint8, p *uint8) *uint8Value {
+ *p = val
+ return (*uint8Value)(p)
+}
+
+func (i *uint8Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 8)
+ *i = uint8Value(v)
+ return err
+}
+
+func (i *uint8Value) Type() string {
+ return "uint8"
+}
+
+func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return uint8(v), nil
+}
+
+// GetUint8 return the uint8 value of a flag with the given name
+func (f *FlagSet) GetUint8(name string) (uint8, error) {
+ val, err := f.getFlagType(name, "uint8", uint8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint8), nil
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func Uint8Var(p *uint8, name string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func Uint8(name string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, "", value, usage)
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go
new file mode 100644
index 0000000..5fa9248
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint_slice.go
@@ -0,0 +1,168 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- uintSlice Value
+type uintSliceValue struct {
+ value *[]uint
+ changed bool
+}
+
+func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
+ uisv := new(uintSliceValue)
+ uisv.value = p
+ *uisv.value = val
+ return uisv
+}
+
+func (s *uintSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return err
+ }
+ out[i] = uint(u)
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *uintSliceValue) Type() string {
+ return "uintSlice"
+}
+
+func (s *uintSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *uintSliceValue) fromString(val string) (uint, error) {
+ t, err := strconv.ParseUint(val, 10, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(t), nil
+}
+
+func (s *uintSliceValue) toString(val uint) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *uintSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *uintSliceValue) Replace(val []string) error {
+ out := make([]uint, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *uintSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func uintSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []uint{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = uint(u)
+ }
+ return out, nil
+}
+
+// GetUintSlice returns the []uint value of a flag with the given name.
+func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
+ val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
+ if err != nil {
+ return []uint{}, err
+ }
+ return val.([]uint), nil
+}
+
+// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
+// The argument p points to a []uint variable in which to store the value of the flag.
+func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
+// The argument p points to a uint[] variable in which to store the value of the flag.
+func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func UintSlice(name string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, "", value, usage)
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/stoewer/go-strcase/.gitignore b/vendor/github.com/stoewer/go-strcase/.gitignore
new file mode 100644
index 0000000..db5247b
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/.gitignore
@@ -0,0 +1,17 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+vendor
+doc
+
+# Temporary files
+*~
+*.swp
+
+# Editor and IDE config
+.idea
+*.iml
+.vscode
diff --git a/vendor/github.com/stoewer/go-strcase/.golangci.yml b/vendor/github.com/stoewer/go-strcase/.golangci.yml
new file mode 100644
index 0000000..7f98d55
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/.golangci.yml
@@ -0,0 +1,26 @@
+run:
+ deadline: 10m
+
+linters:
+ enable:
+ - dupl
+ - goconst
+ - gocyclo
+ - godox
+ - gosec
+ - interfacer
+ - lll
+ - maligned
+ - misspell
+ - prealloc
+ - stylecheck
+ - unconvert
+ - unparam
+ - errcheck
+ - golint
+ - gofmt
+ disable: []
+ fast: false
+
+issues:
+ exclude-use-default: false
diff --git a/vendor/github.com/stoewer/go-strcase/LICENSE b/vendor/github.com/stoewer/go-strcase/LICENSE
new file mode 100644
index 0000000..a105a38
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017, Adrian Stoewer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/stoewer/go-strcase/README.md b/vendor/github.com/stoewer/go-strcase/README.md
new file mode 100644
index 0000000..0e8635d
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/README.md
@@ -0,0 +1,50 @@
+[](https://circleci.com/gh/stoewer/go-strcase/tree/master)
+[](https://codecov.io/gh/stoewer/go-strcase)
+[](https://pkg.go.dev/github.com/stoewer/go-strcase)
+---
+
+Go strcase
+==========
+
+The package `strcase` converts between different kinds of naming formats such as camel case
+(`CamelCase`), snake case (`snake_case`) or kebab case (`kebab-case`).
+The package is designed to work only with strings consisting of standard ASCII letters.
+Unicode is currently not supported.
+
+Versioning and stability
+------------------------
+
+Although the master branch is supposed to remain always backward compatible, the repository
+contains version tags in order to support vendoring tools.
+The tag names follow semantic versioning conventions and have the following format `v1.0.0`.
+This package supports Go modules introduced with version 1.11.
+
+Example
+-------
+
+```go
+import "github.com/stoewer/go-strcase"
+
+var snake = strcase.SnakeCase("CamelCase")
+```
+
+Dependencies
+------------
+
+### Build dependencies
+
+* none
+
+### Test dependencies
+
+* `github.com/stretchr/testify`
+
+Run linters and unit tests
+--------------------------
+
+To run the static code analysis, linters and tests use the following commands:
+
+```
+golangci-lint run --config .golangci.yml ./...
+go test ./...
+```
diff --git a/vendor/github.com/stoewer/go-strcase/camel.go b/vendor/github.com/stoewer/go-strcase/camel.go
new file mode 100644
index 0000000..5c233cc
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/camel.go
@@ -0,0 +1,37 @@
+// Copyright (c) 2017, A. Stoewer
+// All rights reserved.
+
+package strcase
+
+import (
+ "strings"
+)
+
+// UpperCamelCase converts a string into camel case starting with a upper case letter.
+func UpperCamelCase(s string) string {
+ return camelCase(s, true)
+}
+
+// LowerCamelCase converts a string into camel case starting with a lower case letter.
+func LowerCamelCase(s string) string {
+ return camelCase(s, false)
+}
+
+func camelCase(s string, upper bool) string {
+ s = strings.TrimSpace(s)
+ buffer := make([]rune, 0, len(s))
+
+ stringIter(s, func(prev, curr, next rune) {
+ if !isDelimiter(curr) {
+ if isDelimiter(prev) || (upper && prev == 0) {
+ buffer = append(buffer, toUpper(curr))
+ } else if isLower(prev) {
+ buffer = append(buffer, curr)
+ } else {
+ buffer = append(buffer, toLower(curr))
+ }
+ }
+ })
+
+ return string(buffer)
+}
diff --git a/vendor/github.com/stoewer/go-strcase/doc.go b/vendor/github.com/stoewer/go-strcase/doc.go
new file mode 100644
index 0000000..3e441ca
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/doc.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2017, A. Stoewer
+// All rights reserved.
+
+// Package strcase converts between different kinds of naming formats such as camel case
+// (CamelCase), snake case (snake_case) or kebab case (kebab-case). The package is designed
+// to work only with strings consisting of standard ASCII letters. Unicode is currently not
+// supported.
+package strcase
diff --git a/vendor/github.com/stoewer/go-strcase/helper.go b/vendor/github.com/stoewer/go-strcase/helper.go
new file mode 100644
index 0000000..ecad589
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/helper.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2017, A. Stoewer
+// All rights reserved.
+
+package strcase
+
+// isLower checks if a character is lower case. More precisely it evaluates if it is
+// in the range of ASCII character 'a' to 'z'.
+func isLower(ch rune) bool {
+ return ch >= 'a' && ch <= 'z'
+}
+
+// toLower converts a character in the range of ASCII characters 'A' to 'Z' to its lower
+// case counterpart. Other characters remain the same.
+func toLower(ch rune) rune {
+ if ch >= 'A' && ch <= 'Z' {
+ return ch + 32
+ }
+ return ch
+}
+
+// isLower checks if a character is upper case. More precisely it evaluates if it is
+// in the range of ASCII characters 'A' to 'Z'.
+func isUpper(ch rune) bool {
+ return ch >= 'A' && ch <= 'Z'
+}
+
+// toLower converts a character in the range of ASCII characters 'a' to 'z' to its lower
+// case counterpart. Other characters remain the same.
+func toUpper(ch rune) rune {
+ if ch >= 'a' && ch <= 'z' {
+ return ch - 32
+ }
+ return ch
+}
+
+// isSpace checks if a character is some kind of whitespace.
+func isSpace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// isDelimiter checks if a character is some kind of whitespace or '_' or '-'.
+func isDelimiter(ch rune) bool {
+ return ch == '-' || ch == '_' || isSpace(ch)
+}
+
+// iterFunc is a callback that is called fro a specific position in a string. Its arguments are the
+// rune at the respective string position as well as the previous and the next rune. If curr is at the
+// first position of the string prev is zero. If curr is at the end of the string next is zero.
+type iterFunc func(prev, curr, next rune)
+
+// stringIter iterates over a string, invoking the callback for every single rune in the string.
+func stringIter(s string, callback iterFunc) {
+ var prev rune
+ var curr rune
+ for _, next := range s {
+ if curr == 0 {
+ prev = curr
+ curr = next
+ continue
+ }
+
+ callback(prev, curr, next)
+
+ prev = curr
+ curr = next
+ }
+
+ if len(s) > 0 {
+ callback(prev, curr, 0)
+ }
+}
diff --git a/vendor/github.com/stoewer/go-strcase/kebab.go b/vendor/github.com/stoewer/go-strcase/kebab.go
new file mode 100644
index 0000000..e9a6487
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/kebab.go
@@ -0,0 +1,14 @@
+// Copyright (c) 2017, A. Stoewer
+// All rights reserved.
+
+package strcase
+
+// KebabCase converts a string into kebab case.
+func KebabCase(s string) string {
+ return delimiterCase(s, '-', false)
+}
+
+// UpperKebabCase converts a string into kebab case with capital letters.
+func UpperKebabCase(s string) string {
+ return delimiterCase(s, '-', true)
+}
diff --git a/vendor/github.com/stoewer/go-strcase/snake.go b/vendor/github.com/stoewer/go-strcase/snake.go
new file mode 100644
index 0000000..1b216e2
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/snake.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2017, A. Stoewer
+// All rights reserved.
+
+package strcase
+
+import (
+ "strings"
+)
+
+// SnakeCase converts a string into snake case.
+func SnakeCase(s string) string {
+ return delimiterCase(s, '_', false)
+}
+
+// UpperSnakeCase converts a string into snake case with capital letters.
+func UpperSnakeCase(s string) string {
+ return delimiterCase(s, '_', true)
+}
+
+// delimiterCase converts a string into snake_case or kebab-case depending on the delimiter passed
+// as second argument. When upperCase is true the result will be UPPER_SNAKE_CASE or UPPER-KEBAB-CASE.
+func delimiterCase(s string, delimiter rune, upperCase bool) string {
+ s = strings.TrimSpace(s)
+ buffer := make([]rune, 0, len(s)+3)
+
+ adjustCase := toLower
+ if upperCase {
+ adjustCase = toUpper
+ }
+
+ var prev rune
+ var curr rune
+ for _, next := range s {
+ if isDelimiter(curr) {
+ if !isDelimiter(prev) {
+ buffer = append(buffer, delimiter)
+ }
+ } else if isUpper(curr) {
+ if isLower(prev) || (isUpper(prev) && isLower(next)) {
+ buffer = append(buffer, delimiter)
+ }
+ buffer = append(buffer, adjustCase(curr))
+ } else if curr != 0 {
+ buffer = append(buffer, adjustCase(curr))
+ }
+ prev = curr
+ curr = next
+ }
+
+ if len(s) > 0 {
+ if isUpper(curr) && isLower(prev) && prev != 0 {
+ buffer = append(buffer, delimiter)
+ }
+ buffer = append(buffer, adjustCase(curr))
+ }
+
+ return string(buffer)
+}
diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE
new file mode 100644
index 0000000..4b0421c
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
new file mode 100644
index 0000000..4d4b4aa
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -0,0 +1,480 @@
+package assert
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "time"
+)
+
+type CompareType int
+
+const (
+ compareLess CompareType = iota - 1
+ compareEqual
+ compareGreater
+)
+
+var (
+ intType = reflect.TypeOf(int(1))
+ int8Type = reflect.TypeOf(int8(1))
+ int16Type = reflect.TypeOf(int16(1))
+ int32Type = reflect.TypeOf(int32(1))
+ int64Type = reflect.TypeOf(int64(1))
+
+ uintType = reflect.TypeOf(uint(1))
+ uint8Type = reflect.TypeOf(uint8(1))
+ uint16Type = reflect.TypeOf(uint16(1))
+ uint32Type = reflect.TypeOf(uint32(1))
+ uint64Type = reflect.TypeOf(uint64(1))
+
+ uintptrType = reflect.TypeOf(uintptr(1))
+
+ float32Type = reflect.TypeOf(float32(1))
+ float64Type = reflect.TypeOf(float64(1))
+
+ stringType = reflect.TypeOf("")
+
+ timeType = reflect.TypeOf(time.Time{})
+ bytesType = reflect.TypeOf([]byte{})
+)
+
+func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
+ obj1Value := reflect.ValueOf(obj1)
+ obj2Value := reflect.ValueOf(obj2)
+
+ // throughout this switch we try and avoid calling .Convert() if possible,
+ // as this has a pretty big performance impact
+ switch kind {
+ case reflect.Int:
+ {
+ intobj1, ok := obj1.(int)
+ if !ok {
+ intobj1 = obj1Value.Convert(intType).Interface().(int)
+ }
+ intobj2, ok := obj2.(int)
+ if !ok {
+ intobj2 = obj2Value.Convert(intType).Interface().(int)
+ }
+ if intobj1 > intobj2 {
+ return compareGreater, true
+ }
+ if intobj1 == intobj2 {
+ return compareEqual, true
+ }
+ if intobj1 < intobj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int8:
+ {
+ int8obj1, ok := obj1.(int8)
+ if !ok {
+ int8obj1 = obj1Value.Convert(int8Type).Interface().(int8)
+ }
+ int8obj2, ok := obj2.(int8)
+ if !ok {
+ int8obj2 = obj2Value.Convert(int8Type).Interface().(int8)
+ }
+ if int8obj1 > int8obj2 {
+ return compareGreater, true
+ }
+ if int8obj1 == int8obj2 {
+ return compareEqual, true
+ }
+ if int8obj1 < int8obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int16:
+ {
+ int16obj1, ok := obj1.(int16)
+ if !ok {
+ int16obj1 = obj1Value.Convert(int16Type).Interface().(int16)
+ }
+ int16obj2, ok := obj2.(int16)
+ if !ok {
+ int16obj2 = obj2Value.Convert(int16Type).Interface().(int16)
+ }
+ if int16obj1 > int16obj2 {
+ return compareGreater, true
+ }
+ if int16obj1 == int16obj2 {
+ return compareEqual, true
+ }
+ if int16obj1 < int16obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int32:
+ {
+ int32obj1, ok := obj1.(int32)
+ if !ok {
+ int32obj1 = obj1Value.Convert(int32Type).Interface().(int32)
+ }
+ int32obj2, ok := obj2.(int32)
+ if !ok {
+ int32obj2 = obj2Value.Convert(int32Type).Interface().(int32)
+ }
+ if int32obj1 > int32obj2 {
+ return compareGreater, true
+ }
+ if int32obj1 == int32obj2 {
+ return compareEqual, true
+ }
+ if int32obj1 < int32obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int64:
+ {
+ int64obj1, ok := obj1.(int64)
+ if !ok {
+ int64obj1 = obj1Value.Convert(int64Type).Interface().(int64)
+ }
+ int64obj2, ok := obj2.(int64)
+ if !ok {
+ int64obj2 = obj2Value.Convert(int64Type).Interface().(int64)
+ }
+ if int64obj1 > int64obj2 {
+ return compareGreater, true
+ }
+ if int64obj1 == int64obj2 {
+ return compareEqual, true
+ }
+ if int64obj1 < int64obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint:
+ {
+ uintobj1, ok := obj1.(uint)
+ if !ok {
+ uintobj1 = obj1Value.Convert(uintType).Interface().(uint)
+ }
+ uintobj2, ok := obj2.(uint)
+ if !ok {
+ uintobj2 = obj2Value.Convert(uintType).Interface().(uint)
+ }
+ if uintobj1 > uintobj2 {
+ return compareGreater, true
+ }
+ if uintobj1 == uintobj2 {
+ return compareEqual, true
+ }
+ if uintobj1 < uintobj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint8:
+ {
+ uint8obj1, ok := obj1.(uint8)
+ if !ok {
+ uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8)
+ }
+ uint8obj2, ok := obj2.(uint8)
+ if !ok {
+ uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8)
+ }
+ if uint8obj1 > uint8obj2 {
+ return compareGreater, true
+ }
+ if uint8obj1 == uint8obj2 {
+ return compareEqual, true
+ }
+ if uint8obj1 < uint8obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint16:
+ {
+ uint16obj1, ok := obj1.(uint16)
+ if !ok {
+ uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16)
+ }
+ uint16obj2, ok := obj2.(uint16)
+ if !ok {
+ uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16)
+ }
+ if uint16obj1 > uint16obj2 {
+ return compareGreater, true
+ }
+ if uint16obj1 == uint16obj2 {
+ return compareEqual, true
+ }
+ if uint16obj1 < uint16obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint32:
+ {
+ uint32obj1, ok := obj1.(uint32)
+ if !ok {
+ uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32)
+ }
+ uint32obj2, ok := obj2.(uint32)
+ if !ok {
+ uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32)
+ }
+ if uint32obj1 > uint32obj2 {
+ return compareGreater, true
+ }
+ if uint32obj1 == uint32obj2 {
+ return compareEqual, true
+ }
+ if uint32obj1 < uint32obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint64:
+ {
+ uint64obj1, ok := obj1.(uint64)
+ if !ok {
+ uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64)
+ }
+ uint64obj2, ok := obj2.(uint64)
+ if !ok {
+ uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64)
+ }
+ if uint64obj1 > uint64obj2 {
+ return compareGreater, true
+ }
+ if uint64obj1 == uint64obj2 {
+ return compareEqual, true
+ }
+ if uint64obj1 < uint64obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Float32:
+ {
+ float32obj1, ok := obj1.(float32)
+ if !ok {
+ float32obj1 = obj1Value.Convert(float32Type).Interface().(float32)
+ }
+ float32obj2, ok := obj2.(float32)
+ if !ok {
+ float32obj2 = obj2Value.Convert(float32Type).Interface().(float32)
+ }
+ if float32obj1 > float32obj2 {
+ return compareGreater, true
+ }
+ if float32obj1 == float32obj2 {
+ return compareEqual, true
+ }
+ if float32obj1 < float32obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Float64:
+ {
+ float64obj1, ok := obj1.(float64)
+ if !ok {
+ float64obj1 = obj1Value.Convert(float64Type).Interface().(float64)
+ }
+ float64obj2, ok := obj2.(float64)
+ if !ok {
+ float64obj2 = obj2Value.Convert(float64Type).Interface().(float64)
+ }
+ if float64obj1 > float64obj2 {
+ return compareGreater, true
+ }
+ if float64obj1 == float64obj2 {
+ return compareEqual, true
+ }
+ if float64obj1 < float64obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.String:
+ {
+ stringobj1, ok := obj1.(string)
+ if !ok {
+ stringobj1 = obj1Value.Convert(stringType).Interface().(string)
+ }
+ stringobj2, ok := obj2.(string)
+ if !ok {
+ stringobj2 = obj2Value.Convert(stringType).Interface().(string)
+ }
+ if stringobj1 > stringobj2 {
+ return compareGreater, true
+ }
+ if stringobj1 == stringobj2 {
+ return compareEqual, true
+ }
+ if stringobj1 < stringobj2 {
+ return compareLess, true
+ }
+ }
+ // Check for known struct types we can check for compare results.
+ case reflect.Struct:
+ {
+ // All structs enter here. We're not interested in most types.
+ if !obj1Value.CanConvert(timeType) {
+ break
+ }
+
+ // time.Time can be compared!
+ timeObj1, ok := obj1.(time.Time)
+ if !ok {
+ timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
+ }
+
+ timeObj2, ok := obj2.(time.Time)
+ if !ok {
+ timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
+ }
+
+ return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
+ }
+ case reflect.Slice:
+ {
+ // We only care about the []byte type.
+ if !obj1Value.CanConvert(bytesType) {
+ break
+ }
+
+ // []byte can be compared!
+ bytesObj1, ok := obj1.([]byte)
+ if !ok {
+ bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte)
+
+ }
+ bytesObj2, ok := obj2.([]byte)
+ if !ok {
+ bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
+ }
+
+ return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
+ }
+ case reflect.Uintptr:
+ {
+ uintptrObj1, ok := obj1.(uintptr)
+ if !ok {
+ uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr)
+ }
+ uintptrObj2, ok := obj2.(uintptr)
+ if !ok {
+ uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr)
+ }
+ if uintptrObj1 > uintptrObj2 {
+ return compareGreater, true
+ }
+ if uintptrObj1 == uintptrObj2 {
+ return compareEqual, true
+ }
+ if uintptrObj1 < uintptrObj2 {
+ return compareLess, true
+ }
+ }
+ }
+
+ return compareEqual, false
+}
+
+// Greater asserts that the first element is greater than the second
+//
+// assert.Greater(t, 2, 1)
+// assert.Greater(t, float64(2), float64(1))
+// assert.Greater(t, "b", "a")
+func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
+}
+
+// GreaterOrEqual asserts that the first element is greater than or equal to the second
+//
+// assert.GreaterOrEqual(t, 2, 1)
+// assert.GreaterOrEqual(t, 2, 2)
+// assert.GreaterOrEqual(t, "b", "a")
+// assert.GreaterOrEqual(t, "b", "b")
+func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
+}
+
+// Less asserts that the first element is less than the second
+//
+// assert.Less(t, 1, 2)
+// assert.Less(t, float64(1), float64(2))
+// assert.Less(t, "a", "b")
+func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
+}
+
+// LessOrEqual asserts that the first element is less than or equal to the second
+//
+// assert.LessOrEqual(t, 1, 2)
+// assert.LessOrEqual(t, 2, 2)
+// assert.LessOrEqual(t, "a", "b")
+// assert.LessOrEqual(t, "b", "b")
+func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
+}
+
+// Positive asserts that the specified element is positive
+//
+// assert.Positive(t, 1)
+// assert.Positive(t, 1.23)
+func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ zero := reflect.Zero(reflect.TypeOf(e))
+ return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
+}
+
+// Negative asserts that the specified element is negative
+//
+// assert.Negative(t, -1)
+// assert.Negative(t, -1.23)
+func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ zero := reflect.Zero(reflect.TypeOf(e))
+ return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
+}
+
+func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ e1Kind := reflect.ValueOf(e1).Kind()
+ e2Kind := reflect.ValueOf(e2).Kind()
+ if e1Kind != e2Kind {
+ return Fail(t, "Elements should be the same type", msgAndArgs...)
+ }
+
+ compareResult, isComparable := compare(e1, e2, e1Kind)
+ if !isComparable {
+ return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+ }
+
+ if !containsValue(allowedComparesResults, compareResult) {
+ return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
+ }
+
+ return true
+}
+
+func containsValue(values []CompareType, value CompareType) bool {
+ for _, v := range values {
+ if v == value {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
new file mode 100644
index 0000000..3ddab10
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -0,0 +1,815 @@
+// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
+
+package assert
+
+import (
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+// Conditionf uses a Comparison to assert a complex condition.
+func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Condition(t, comp, append([]interface{}{msg}, args...)...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return DirExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
+func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
+}
+
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Emptyf(t, obj, "error message %s", "formatted")
+func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Empty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// assert.Equalf(t, 123, 123, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
+func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
+}
+
+// EqualExportedValuesf asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
+//
+// type S struct {
+// Exported int
+// notExported int
+// }
+// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true
+// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false
+func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertible to the same types
+// and equal.
+//
+// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
+func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Errorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Error(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
+func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...)
+}
+
+// ErrorIsf asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// Eventuallyf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
+// EventuallyWithTf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+// externalValue := false
+// go func() {
+// time.Sleep(8*time.Second)
+// externalValue = true
+// }()
+// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") {
+// // add assertions as needed; any assertion failure will fail the current tick
+// assert.True(c, externalValue, "expected 'externalValue' to be true")
+// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return EventuallyWithT(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
+func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Failf reports a failure through
+func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// FailNowf fails test
+func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+// assert.Falsef(t, myBool, "error message %s", "formatted")
+func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return False(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return FileExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// Greaterf asserts that the first element is greater than the second
+//
+// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
+// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
+// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
+func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Greater(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// GreaterOrEqualf asserts that the first element is greater than or equal to the second
+//
+// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
+func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// IsDecreasingf asserts that the collection is decreasing
+//
+// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
+// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
+// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsDecreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsIncreasingf asserts that the collection is increasing
+//
+// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
+// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
+// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsIncreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsNonDecreasingf asserts that the collection is not decreasing
+//
+// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
+// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
+// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsNonIncreasingf asserts that the collection is not increasing
+//
+// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
+// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
+// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Len(t, object, length, append([]interface{}{msg}, args...)...)
+}
+
+// Lessf asserts that the first element is less than the second
+//
+// assert.Lessf(t, 1, 2, "error message %s", "formatted")
+// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
+// assert.Lessf(t, "a", "b", "error message %s", "formatted")
+func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Less(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// LessOrEqualf asserts that the first element is less than or equal to the second
+//
+// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
+// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
+// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
+// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
+func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// Negativef asserts that the specified element is negative
+//
+// assert.Negativef(t, -1, "error message %s", "formatted")
+// assert.Negativef(t, -1.23, "error message %s", "formatted")
+func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Negative(t, e, append([]interface{}{msg}, args...)...)
+}
+
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+// assert.Nilf(t, err, "error message %s", "formatted")
+func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Nil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoErrorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoError(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
+func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// NotImplementsf asserts that an object does not implement the specified interface.
+//
+// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+// assert.NotNilf(t, err, "error message %s", "formatted")
+func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotNil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotPanics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
+// contain all elements given in the specified subset list(array, slice...) or
+// map.
+//
+// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
+// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// NotZerof asserts that i is not the zero value for its type.
+func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotZero(t, i, append([]interface{}{msg}, args...)...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Panics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
+}
+
+// Positivef asserts that the specified element is positive
+//
+// assert.Positivef(t, 1, "error message %s", "formatted")
+// assert.Positivef(t, 1.23, "error message %s", "formatted")
+func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Positive(t, e, append([]interface{}{msg}, args...)...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// Samef asserts that two pointers reference the same object.
+//
+// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) or map contains all
+// elements given in the specified subset list(array, slice...) or map.
+//
+// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
+// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// Truef asserts that the specified value is true.
+//
+// assert.Truef(t, myBool, "error message %s", "formatted")
+func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return True(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// WithinRangef asserts that a time is within a time range (inclusive).
+//
+// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...)
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Zerof asserts that i is the zero value for its type.
+func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Zero(t, i, append([]interface{}{msg}, args...)...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
new file mode 100644
index 0000000..d2bb0b8
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
@@ -0,0 +1,5 @@
+{{.CommentFormat}}
+func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool {
+ if h, ok := t.(tHelper); ok { h.Helper() }
+ return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}})
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
new file mode 100644
index 0000000..a84e09b
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -0,0 +1,1621 @@
+// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
+
+package assert
+
+import (
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+// Condition uses a Comparison to assert a complex condition.
+func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Condition(a.t, comp, msgAndArgs...)
+}
+
+// Conditionf uses a Comparison to assert a complex condition.
+func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Conditionf(a.t, comp, msg, args...)
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Contains("Hello World", "World")
+// a.Contains(["Hello", "World"], "World")
+// a.Contains({"Hello": "World"}, "Hello")
+func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Contains(a.t, s, contains, msgAndArgs...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Containsf("Hello World", "World", "error message %s", "formatted")
+// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
+// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
+func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Containsf(a.t, s, contains, msg, args...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return DirExists(a.t, path, msgAndArgs...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return DirExistsf(a.t, path, msg, args...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2])
+func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
+func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ElementsMatchf(a.t, listA, listB, msg, args...)
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Empty(obj)
+func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Empty(a.t, object, msgAndArgs...)
+}
+
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Emptyf(obj, "error message %s", "formatted")
+func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Emptyf(a.t, object, msg, args...)
+}
+
+// Equal asserts that two objects are equal.
+//
+// a.Equal(123, 123)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Equal(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualError(err, expectedErrorString)
+func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualError(a.t, theError, errString, msgAndArgs...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted")
+func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualErrorf(a.t, theError, errString, msg, args...)
+}
+
+// EqualExportedValues asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
+//
+// type S struct {
+// Exported int
+// notExported int
+// }
+// a.EqualExportedValues(S{1, 2}, S{1, 3}) => true
+// a.EqualExportedValues(S{1, 2}, S{2, 3}) => false
+func (a *Assertions) EqualExportedValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualExportedValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualExportedValuesf asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
+//
+// type S struct {
+// Exported int
+// notExported int
+// }
+// a.EqualExportedValuesf(S{1, 2}, S{1, 3}, "error message %s", "formatted") => true
+// a.EqualExportedValuesf(S{1, 2}, S{2, 3}, "error message %s", "formatted") => false
+func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualExportedValuesf(a.t, expected, actual, msg, args...)
+}
+
+// EqualValues asserts that two objects are equal or convertible to the same types
+// and equal.
+//
+// a.EqualValues(uint32(123), int32(123))
+func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertible to the same types
+// and equal.
+//
+// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
+func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualValuesf(a.t, expected, actual, msg, args...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// a.Equalf(123, 123, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Equalf(a.t, expected, actual, msg, args...)
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Error(err) {
+// assert.Equal(t, expectedError, err)
+// }
+func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Error(a.t, err, msgAndArgs...)
+}
+
+// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorAs(a.t, err, target, msgAndArgs...)
+}
+
+// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorAsf(a.t, err, target, msg, args...)
+}
+
+// ErrorContains asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// a.ErrorContains(err, expectedErrorSubString)
+func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorContains(a.t, theError, contains, msgAndArgs...)
+}
+
+// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted")
+func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorContainsf(a.t, theError, contains, msg, args...)
+}
+
+// ErrorIs asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorIs(a.t, err, target, msgAndArgs...)
+}
+
+// ErrorIsf asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorIsf(a.t, err, target, msg, args...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Errorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Errorf(a.t, err, msg, args...)
+}
+
+// Eventually asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
+func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Eventually(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// EventuallyWithT asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+// externalValue := false
+// go func() {
+// time.Sleep(8*time.Second)
+// externalValue = true
+// }()
+// a.EventuallyWithT(func(c *assert.CollectT) {
+// // add assertions as needed; any assertion failure will fail the current tick
+// assert.True(c, externalValue, "expected 'externalValue' to be true")
+// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EventuallyWithT(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// EventuallyWithTf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+// externalValue := false
+// go func() {
+// time.Sleep(8*time.Second)
+// externalValue = true
+// }()
+// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") {
+// // add assertions as needed; any assertion failure will fail the current tick
+// assert.True(c, externalValue, "expected 'externalValue' to be true")
+// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EventuallyWithTf(a.t, condition, waitFor, tick, msg, args...)
+}
+
+// Eventuallyf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Eventuallyf(a.t, condition, waitFor, tick, msg, args...)
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+// a.Exactly(int32(123), int64(123))
+func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Exactly(a.t, expected, actual, msgAndArgs...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
+func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Exactlyf(a.t, expected, actual, msg, args...)
+}
+
+// Fail reports a failure through
+func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNow fails test
+func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FailNow(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNowf fails test
+func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FailNowf(a.t, failureMessage, msg, args...)
+}
+
+// Failf reports a failure through
+func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Failf(a.t, failureMessage, msg, args...)
+}
+
+// False asserts that the specified value is false.
+//
+// a.False(myBool)
+func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return False(a.t, value, msgAndArgs...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+// a.Falsef(myBool, "error message %s", "formatted")
+func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Falsef(a.t, value, msg, args...)
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FileExists(a.t, path, msgAndArgs...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FileExistsf(a.t, path, msg, args...)
+}
+
+// Greater asserts that the first element is greater than the second
+//
+// a.Greater(2, 1)
+// a.Greater(float64(2), float64(1))
+// a.Greater("b", "a")
+func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Greater(a.t, e1, e2, msgAndArgs...)
+}
+
+// GreaterOrEqual asserts that the first element is greater than or equal to the second
+//
+// a.GreaterOrEqual(2, 1)
+// a.GreaterOrEqual(2, 2)
+// a.GreaterOrEqual("b", "a")
+// a.GreaterOrEqual("b", "b")
+func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return GreaterOrEqual(a.t, e1, e2, msgAndArgs...)
+}
+
+// GreaterOrEqualf asserts that the first element is greater than or equal to the second
+//
+// a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
+// a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
+// a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
+// a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
+func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return GreaterOrEqualf(a.t, e1, e2, msg, args...)
+}
+
+// Greaterf asserts that the first element is greater than the second
+//
+// a.Greaterf(2, 1, "error message %s", "formatted")
+// a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
+// a.Greaterf("b", "a", "error message %s", "formatted")
+func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Greaterf(a.t, e1, e2, msg, args...)
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPError(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPErrorf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...)
+}
+
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...)
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPSuccessf(a.t, handler, method, url, values, msg, args...)
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// a.Implements((*MyInterface)(nil), new(MyObject))
+func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Implements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Implementsf(a.t, interfaceObject, object, msg, args...)
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// a.InDelta(math.Pi, 22/7.0, 0.01)
+func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDelta(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// IsDecreasing asserts that the collection is decreasing
+//
+// a.IsDecreasing([]int{2, 1, 0})
+// a.IsDecreasing([]float{2, 1})
+// a.IsDecreasing([]string{"b", "a"})
+func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsDecreasing(a.t, object, msgAndArgs...)
+}
+
+// IsDecreasingf asserts that the collection is decreasing
+//
+// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
+// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
+// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
+func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsDecreasingf(a.t, object, msg, args...)
+}
+
+// IsIncreasing asserts that the collection is increasing
+//
+// a.IsIncreasing([]int{1, 2, 3})
+// a.IsIncreasing([]float{1, 2})
+// a.IsIncreasing([]string{"a", "b"})
+func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsIncreasing(a.t, object, msgAndArgs...)
+}
+
+// IsIncreasingf asserts that the collection is increasing
+//
+// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
+// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
+// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
+func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsIncreasingf(a.t, object, msg, args...)
+}
+
+// IsNonDecreasing asserts that the collection is not decreasing
+//
+// a.IsNonDecreasing([]int{1, 1, 2})
+// a.IsNonDecreasing([]float{1, 2})
+// a.IsNonDecreasing([]string{"a", "b"})
+func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonDecreasing(a.t, object, msgAndArgs...)
+}
+
+// IsNonDecreasingf asserts that the collection is not decreasing
+//
+// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
+// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
+// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
+func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonDecreasingf(a.t, object, msg, args...)
+}
+
+// IsNonIncreasing asserts that the collection is not increasing
+//
+// a.IsNonIncreasing([]int{2, 1, 1})
+// a.IsNonIncreasing([]float{2, 1})
+// a.IsNonIncreasing([]string{"b", "a"})
+func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonIncreasing(a.t, object, msgAndArgs...)
+}
+
+// IsNonIncreasingf asserts that the collection is not increasing
+//
+// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
+// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
+// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
+func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonIncreasingf(a.t, object, msg, args...)
+}
+
+// IsType asserts that the specified objects are of the same type.
+func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsType(a.t, expectedType, object, msgAndArgs...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsTypef(a.t, expectedType, object, msg, args...)
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return JSONEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return JSONEqf(a.t, expected, actual, msg, args...)
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+// a.Len(mySlice, 3)
+func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Len(a.t, object, length, msgAndArgs...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// a.Lenf(mySlice, 3, "error message %s", "formatted")
+func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Lenf(a.t, object, length, msg, args...)
+}
+
+// Less asserts that the first element is less than the second
+//
+// a.Less(1, 2)
+// a.Less(float64(1), float64(2))
+// a.Less("a", "b")
+func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Less(a.t, e1, e2, msgAndArgs...)
+}
+
+// LessOrEqual asserts that the first element is less than or equal to the second
+//
+// a.LessOrEqual(1, 2)
+// a.LessOrEqual(2, 2)
+// a.LessOrEqual("a", "b")
+// a.LessOrEqual("b", "b")
+func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return LessOrEqual(a.t, e1, e2, msgAndArgs...)
+}
+
+// LessOrEqualf asserts that the first element is less than or equal to the second
+//
+// a.LessOrEqualf(1, 2, "error message %s", "formatted")
+// a.LessOrEqualf(2, 2, "error message %s", "formatted")
+// a.LessOrEqualf("a", "b", "error message %s", "formatted")
+// a.LessOrEqualf("b", "b", "error message %s", "formatted")
+func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return LessOrEqualf(a.t, e1, e2, msg, args...)
+}
+
+// Lessf asserts that the first element is less than the second
+//
+// a.Lessf(1, 2, "error message %s", "formatted")
+// a.Lessf(float64(1), float64(2), "error message %s", "formatted")
+// a.Lessf("a", "b", "error message %s", "formatted")
+func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Lessf(a.t, e1, e2, msg, args...)
+}
+
+// Negative asserts that the specified element is negative
+//
+// a.Negative(-1)
+// a.Negative(-1.23)
+func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Negative(a.t, e, msgAndArgs...)
+}
+
+// Negativef asserts that the specified element is negative
+//
+// a.Negativef(-1, "error message %s", "formatted")
+// a.Negativef(-1.23, "error message %s", "formatted")
+func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Negativef(a.t, e, msg, args...)
+}
+
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
+func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Never(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Neverf(a.t, condition, waitFor, tick, msg, args...)
+}
+
+// Nil asserts that the specified object is nil.
+//
+// a.Nil(err)
+func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Nil(a.t, object, msgAndArgs...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+// a.Nilf(err, "error message %s", "formatted")
+func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Nilf(a.t, object, msg, args...)
+}
+
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExists(a.t, path, msgAndArgs...)
+}
+
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExistsf(a.t, path, msg, args...)
+}
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoError(err) {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoError(a.t, err, msgAndArgs...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoErrorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoErrorf(a.t, err, msg, args...)
+}
+
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExists(a.t, path, msgAndArgs...)
+}
+
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExistsf(a.t, path, msg, args...)
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContains("Hello World", "Earth")
+// a.NotContains(["Hello", "World"], "Earth")
+// a.NotContains({"Hello": "World"}, "Earth")
+func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotContains(a.t, s, contains, msgAndArgs...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
+// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
+// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
+func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotContainsf(a.t, s, contains, msg, args...)
+}
+
+// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmpty(obj) {
+// assert.Equal(t, "two", obj[1])
+// }
+func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEmpty(a.t, object, msgAndArgs...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmptyf(obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEmptyf(a.t, object, msg, args...)
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// a.NotEqual(obj1, obj2)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqual(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValues(obj1, obj2)
+func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
+func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValuesf(a.t, expected, actual, msg, args...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualf(a.t, expected, actual, msg, args...)
+}
+
+// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorIs(a.t, err, target, msgAndArgs...)
+}
+
+// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorIsf(a.t, err, target, msg, args...)
+}
+
+// NotImplements asserts that an object does not implement the specified interface.
+//
+// a.NotImplements((*MyInterface)(nil), new(MyObject))
+func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotImplements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// NotImplementsf asserts that an object does not implement the specified interface.
+//
+// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotImplementsf(a.t, interfaceObject, object, msg, args...)
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// a.NotNil(err)
+func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotNil(a.t, object, msgAndArgs...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+// a.NotNilf(err, "error message %s", "formatted")
+func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotNilf(a.t, object, msg, args...)
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanics(func(){ RemainCalm() })
+func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotPanics(a.t, f, msgAndArgs...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
+func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotPanicsf(a.t, f, msg, args...)
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
+// a.NotRegexp("^start", "it's not starting")
+func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotRegexp(a.t, rx, str, msgAndArgs...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
+func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotRegexpf(a.t, rx, str, msg, args...)
+}
+
+// NotSame asserts that two pointers do not reference the same object.
+//
+// a.NotSame(ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSame(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSamef(a.t, expected, actual, msg, args...)
+}
+
+// NotSubset asserts that the specified list(array, slice...) or map does NOT
+// contain all elements given in the specified subset list(array, slice...) or
+// map.
+//
+// a.NotSubset([1, 3, 4], [1, 2])
+// a.NotSubset({"x": 1, "y": 2}, {"z": 3})
+func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSubset(a.t, list, subset, msgAndArgs...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
+// contain all elements given in the specified subset list(array, slice...) or
+// map.
+//
+// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
+// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSubsetf(a.t, list, subset, msg, args...)
+}
+
+// NotZero asserts that i is not the zero value for its type.
+func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotZero(a.t, i, msgAndArgs...)
+}
+
+// NotZerof asserts that i is not the zero value for its type.
+func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotZerof(a.t, i, msg, args...)
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panics(func(){ GoCrazy() })
+func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Panics(a.t, f, msgAndArgs...)
+}
+
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithError("crazy error", func(){ GoCrazy() })
+func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithError(a.t, errString, f, msgAndArgs...)
+}
+
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithErrorf(a.t, errString, f, msg, args...)
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValue("crazy error", func(){ GoCrazy() })
+func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithValue(a.t, expected, f, msgAndArgs...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithValuef(a.t, expected, f, msg, args...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Panicsf(a.t, f, msg, args...)
+}
+
+// Positive asserts that the specified element is positive
+//
+// a.Positive(1)
+// a.Positive(1.23)
+func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Positive(a.t, e, msgAndArgs...)
+}
+
+// Positivef asserts that the specified element is positive
+//
+// a.Positivef(1, "error message %s", "formatted")
+// a.Positivef(1.23, "error message %s", "formatted")
+func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Positivef(a.t, e, msg, args...)
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+// a.Regexp(regexp.MustCompile("start"), "it's starting")
+// a.Regexp("start...$", "it's not starting")
+func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Regexp(a.t, rx, str, msgAndArgs...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
+func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Regexpf(a.t, rx, str, msg, args...)
+}
+
+// Same asserts that two pointers reference the same object.
+//
+// a.Same(ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Same(a.t, expected, actual, msgAndArgs...)
+}
+
+// Samef asserts that two pointers reference the same object.
+//
+// a.Samef(ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Samef(a.t, expected, actual, msg, args...)
+}
+
+// Subset asserts that the specified list(array, slice...) or map contains all
+// elements given in the specified subset list(array, slice...) or map.
+//
+// a.Subset([1, 2, 3], [1, 2])
+// a.Subset({"x": 1, "y": 2}, {"x": 1})
+func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Subset(a.t, list, subset, msgAndArgs...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) or map contains all
+// elements given in the specified subset list(array, slice...) or map.
+//
+// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
+// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Subsetf(a.t, list, subset, msg, args...)
+}
+
+// True asserts that the specified value is true.
+//
+// a.True(myBool)
+func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return True(a.t, value, msgAndArgs...)
+}
+
+// Truef asserts that the specified value is true.
+//
+// a.Truef(myBool, "error message %s", "formatted")
+func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Truef(a.t, value, msg, args...)
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
+func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinDurationf(a.t, expected, actual, delta, msg, args...)
+}
+
+// WithinRange asserts that a time is within a time range (inclusive).
+//
+// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinRange(a.t, actual, start, end, msgAndArgs...)
+}
+
+// WithinRangef asserts that a time is within a time range (inclusive).
+//
+// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinRangef(a.t, actual, start, end, msg, args...)
+}
+
+// YAMLEq asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEqf(a.t, expected, actual, msg, args...)
+}
+
+// Zero asserts that i is the zero value for its type.
+func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Zero(a.t, i, msgAndArgs...)
+}
+
+// Zerof asserts that i is the zero value for its type.
+func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Zerof(a.t, i, msg, args...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
new file mode 100644
index 0000000..188bb9e
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
@@ -0,0 +1,5 @@
+{{.CommentWithoutT "a"}}
+func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
+ if h, ok := a.t.(tHelper); ok { h.Helper() }
+ return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
new file mode 100644
index 0000000..00df62a
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -0,0 +1,81 @@
+package assert
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// isOrdered checks that collection contains orderable elements.
+func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+ objKind := reflect.TypeOf(object).Kind()
+ if objKind != reflect.Slice && objKind != reflect.Array {
+ return false
+ }
+
+ objValue := reflect.ValueOf(object)
+ objLen := objValue.Len()
+
+ if objLen <= 1 {
+ return true
+ }
+
+ value := objValue.Index(0)
+ valueInterface := value.Interface()
+ firstValueKind := value.Kind()
+
+ for i := 1; i < objLen; i++ {
+ prevValue := value
+ prevValueInterface := valueInterface
+
+ value = objValue.Index(i)
+ valueInterface = value.Interface()
+
+ compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
+
+ if !isComparable {
+ return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
+ }
+
+ if !containsValue(allowedComparesResults, compareResult) {
+ return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...)
+ }
+ }
+
+ return true
+}
+
+// IsIncreasing asserts that the collection is increasing
+//
+// assert.IsIncreasing(t, []int{1, 2, 3})
+// assert.IsIncreasing(t, []float{1, 2})
+// assert.IsIncreasing(t, []string{"a", "b"})
+func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
+}
+
+// IsNonIncreasing asserts that the collection is not increasing
+//
+// assert.IsNonIncreasing(t, []int{2, 1, 1})
+// assert.IsNonIncreasing(t, []float{2, 1})
+// assert.IsNonIncreasing(t, []string{"b", "a"})
+func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
+}
+
+// IsDecreasing asserts that the collection is decreasing
+//
+// assert.IsDecreasing(t, []int{2, 1, 0})
+// assert.IsDecreasing(t, []float{2, 1})
+// assert.IsDecreasing(t, []string{"b", "a"})
+func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
+}
+
+// IsNonDecreasing asserts that the collection is not decreasing
+//
+// assert.IsNonDecreasing(t, []int{1, 1, 2})
+// assert.IsNonDecreasing(t, []float{1, 2})
+// assert.IsNonDecreasing(t, []string{"a", "b"})
+func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
new file mode 100644
index 0000000..0b7570f
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -0,0 +1,2105 @@
+package assert
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "os"
+ "reflect"
+ "regexp"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/pmezard/go-difflib/difflib"
+ "gopkg.in/yaml.v3"
+)
+
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
+
+// TestingT is an interface wrapper around *testing.T
+type TestingT interface {
+ Errorf(format string, args ...interface{})
+}
+
+// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful
+// for table driven tests.
+type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool
+
+// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful
+// for table driven tests.
+type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool
+
+// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful
+// for table driven tests.
+type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
+
+// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful
+// for table driven tests.
+type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
+
+// Comparison is a custom function that returns true on success and false on failure
+type Comparison func() (success bool)
+
+/*
+ Helper functions
+*/
+
+// ObjectsAreEqual determines if two objects are considered equal.
+//
+// This function does no assertion of any kind.
+func ObjectsAreEqual(expected, actual interface{}) bool {
+ if expected == nil || actual == nil {
+ return expected == actual
+ }
+
+ exp, ok := expected.([]byte)
+ if !ok {
+ return reflect.DeepEqual(expected, actual)
+ }
+
+ act, ok := actual.([]byte)
+ if !ok {
+ return false
+ }
+ if exp == nil || act == nil {
+ return exp == nil && act == nil
+ }
+ return bytes.Equal(exp, act)
+}
+
+// copyExportedFields iterates downward through nested data structures and creates a copy
+// that only contains the exported struct fields.
+func copyExportedFields(expected interface{}) interface{} {
+ if isNil(expected) {
+ return expected
+ }
+
+ expectedType := reflect.TypeOf(expected)
+ expectedKind := expectedType.Kind()
+ expectedValue := reflect.ValueOf(expected)
+
+ switch expectedKind {
+ case reflect.Struct:
+ result := reflect.New(expectedType).Elem()
+ for i := 0; i < expectedType.NumField(); i++ {
+ field := expectedType.Field(i)
+ isExported := field.IsExported()
+ if isExported {
+ fieldValue := expectedValue.Field(i)
+ if isNil(fieldValue) || isNil(fieldValue.Interface()) {
+ continue
+ }
+ newValue := copyExportedFields(fieldValue.Interface())
+ result.Field(i).Set(reflect.ValueOf(newValue))
+ }
+ }
+ return result.Interface()
+
+ case reflect.Ptr:
+ result := reflect.New(expectedType.Elem())
+ unexportedRemoved := copyExportedFields(expectedValue.Elem().Interface())
+ result.Elem().Set(reflect.ValueOf(unexportedRemoved))
+ return result.Interface()
+
+ case reflect.Array, reflect.Slice:
+ var result reflect.Value
+ if expectedKind == reflect.Array {
+ result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem()
+ } else {
+ result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len())
+ }
+ for i := 0; i < expectedValue.Len(); i++ {
+ index := expectedValue.Index(i)
+ if isNil(index) {
+ continue
+ }
+ unexportedRemoved := copyExportedFields(index.Interface())
+ result.Index(i).Set(reflect.ValueOf(unexportedRemoved))
+ }
+ return result.Interface()
+
+ case reflect.Map:
+ result := reflect.MakeMap(expectedType)
+ for _, k := range expectedValue.MapKeys() {
+ index := expectedValue.MapIndex(k)
+ unexportedRemoved := copyExportedFields(index.Interface())
+ result.SetMapIndex(k, reflect.ValueOf(unexportedRemoved))
+ }
+ return result.Interface()
+
+ default:
+ return expected
+ }
+}
+
+// ObjectsExportedFieldsAreEqual determines if the exported (public) fields of two objects are
+// considered equal. This comparison of only exported fields is applied recursively to nested data
+// structures.
+//
+// This function does no assertion of any kind.
+//
+// Deprecated: Use [EqualExportedValues] instead.
+func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool {
+ expectedCleaned := copyExportedFields(expected)
+ actualCleaned := copyExportedFields(actual)
+ return ObjectsAreEqualValues(expectedCleaned, actualCleaned)
+}
+
+// ObjectsAreEqualValues gets whether two objects are equal, or if their
+// values are equal.
+func ObjectsAreEqualValues(expected, actual interface{}) bool {
+ if ObjectsAreEqual(expected, actual) {
+ return true
+ }
+
+ expectedValue := reflect.ValueOf(expected)
+ actualValue := reflect.ValueOf(actual)
+ if !expectedValue.IsValid() || !actualValue.IsValid() {
+ return false
+ }
+
+ expectedType := expectedValue.Type()
+ actualType := actualValue.Type()
+ if !expectedType.ConvertibleTo(actualType) {
+ return false
+ }
+
+ if !isNumericType(expectedType) || !isNumericType(actualType) {
+ // Attempt comparison after type conversion
+ return reflect.DeepEqual(
+ expectedValue.Convert(actualType).Interface(), actual,
+ )
+ }
+
+ // If BOTH values are numeric, there are chances of false positives due
+ // to overflow or underflow. So, we need to make sure to always convert
+ // the smaller type to a larger type before comparing.
+ if expectedType.Size() >= actualType.Size() {
+ return actualValue.Convert(expectedType).Interface() == expected
+ }
+
+ return expectedValue.Convert(actualType).Interface() == actual
+}
+
+// isNumericType returns true if the type is one of:
+// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64,
+// float32, float64, complex64, complex128
+func isNumericType(t reflect.Type) bool {
+ return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128
+}
+
+/* CallerInfo is necessary because the assert functions use the testing object
+internally, causing it to print the file:line of the assert method, rather than where
+the problem actually occurred in calling code.*/
+
+// CallerInfo returns an array of strings containing the file and line number
+// of each stack frame leading from the current test to the assert call that
+// failed.
+func CallerInfo() []string {
+
+ var pc uintptr
+ var ok bool
+ var file string
+ var line int
+ var name string
+
+ callers := []string{}
+ for i := 0; ; i++ {
+ pc, file, line, ok = runtime.Caller(i)
+ if !ok {
+ // The breaks below failed to terminate the loop, and we ran off the
+ // end of the call stack.
+ break
+ }
+
+ // This is a huge edge case, but it will panic if this is the case, see #180
+ if file == "" {
+ break
+ }
+
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ break
+ }
+ name = f.Name()
+
+ // testing.tRunner is the standard library function that calls
+ // tests. Subtests are called directly by tRunner, without going through
+ // the Test/Benchmark/Example function that contains the t.Run calls, so
+ // with subtests we should break when we hit tRunner, without adding it
+ // to the list of callers.
+ if name == "testing.tRunner" {
+ break
+ }
+
+ parts := strings.Split(file, "/")
+ if len(parts) > 1 {
+ filename := parts[len(parts)-1]
+ dir := parts[len(parts)-2]
+ if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
+ callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ }
+ }
+
+ // Drop the package
+ segments := strings.Split(name, ".")
+ name = segments[len(segments)-1]
+ if isTest(name, "Test") ||
+ isTest(name, "Benchmark") ||
+ isTest(name, "Example") {
+ break
+ }
+ }
+
+ return callers
+}
+
+// Stolen from the `go test` tool.
+// isTest tells whether name looks like a test (or benchmark, according to prefix).
+// It is a Test (say) if there is a character after Test that is not a lower-case letter.
+// We don't want TesticularCancer.
+func isTest(name, prefix string) bool {
+ if !strings.HasPrefix(name, prefix) {
+ return false
+ }
+ if len(name) == len(prefix) { // "Test" is ok
+ return true
+ }
+ r, _ := utf8.DecodeRuneInString(name[len(prefix):])
+ return !unicode.IsLower(r)
+}
+
+func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
+ if len(msgAndArgs) == 0 || msgAndArgs == nil {
+ return ""
+ }
+ if len(msgAndArgs) == 1 {
+ msg := msgAndArgs[0]
+ if msgAsStr, ok := msg.(string); ok {
+ return msgAsStr
+ }
+ return fmt.Sprintf("%+v", msg)
+ }
+ if len(msgAndArgs) > 1 {
+ return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
+ }
+ return ""
+}
+
+// Aligns the provided message so that all lines after the first line start at the same location as the first line.
+// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
+// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the
+// basis on which the alignment occurs).
+func indentMessageLines(message string, longestLabelLen int) string {
+ outBuf := new(bytes.Buffer)
+
+ for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ {
+ // no need to align first line because it starts at the correct location (after the label)
+ if i != 0 {
+ // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab
+ outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t")
+ }
+ outBuf.WriteString(scanner.Text())
+ }
+
+ return outBuf.String()
+}
+
+type failNower interface {
+ FailNow()
+}
+
+// FailNow fails test
+func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ Fail(t, failureMessage, msgAndArgs...)
+
+ // We cannot extend TestingT with FailNow() and
+ // maintain backwards compatibility, so we fallback
+ // to panicking when FailNow is not available in
+ // TestingT.
+ // See issue #263
+
+ if t, ok := t.(failNower); ok {
+ t.FailNow()
+ } else {
+ panic("test failed and t is missing `FailNow()`")
+ }
+ return false
+}
+
+// Fail reports a failure through
+func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ content := []labeledContent{
+ {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")},
+ {"Error", failureMessage},
+ }
+
+ // Add test name if the Go version supports it
+ if n, ok := t.(interface {
+ Name() string
+ }); ok {
+ content = append(content, labeledContent{"Test", n.Name()})
+ }
+
+ message := messageFromMsgAndArgs(msgAndArgs...)
+ if len(message) > 0 {
+ content = append(content, labeledContent{"Messages", message})
+ }
+
+ t.Errorf("\n%s", ""+labeledOutput(content...))
+
+ return false
+}
+
+type labeledContent struct {
+ label string
+ content string
+}
+
+// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner:
+//
+// \t{{label}}:{{align_spaces}}\t{{content}}\n
+//
+// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label.
+// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this
+// alignment is achieved, "\t{{content}}\n" is added for the output.
+//
+// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line.
+func labeledOutput(content ...labeledContent) string {
+ longestLabel := 0
+ for _, v := range content {
+ if len(v.label) > longestLabel {
+ longestLabel = len(v.label)
+ }
+ }
+ var output string
+ for _, v := range content {
+ output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n"
+ }
+ return output
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
+func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ interfaceType := reflect.TypeOf(interfaceObject).Elem()
+
+ if object == nil {
+ return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...)
+ }
+ if !reflect.TypeOf(object).Implements(interfaceType) {
+ return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...)
+ }
+
+ return true
+}
+
+// NotImplements asserts that an object does not implement the specified interface.
+//
+// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject))
+func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ interfaceType := reflect.TypeOf(interfaceObject).Elem()
+
+ if object == nil {
+ return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...)
+ }
+ if reflect.TypeOf(object).Implements(interfaceType) {
+ return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...)
+ }
+
+ return true
+}
+
+// IsType asserts that the specified objects are of the same type.
+func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
+ return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
+ }
+
+ return true
+}
+
+// Equal asserts that two objects are equal.
+//
+// assert.Equal(t, 123, 123)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if err := validateEqualArgs(expected, actual); err != nil {
+ return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)",
+ expected, actual, err), msgAndArgs...)
+ }
+
+ if !ObjectsAreEqual(expected, actual) {
+ diff := diff(expected, actual)
+ expected, actual = formatUnequalValues(expected, actual)
+ return Fail(t, fmt.Sprintf("Not equal: \n"+
+ "expected: %s\n"+
+ "actual : %s%s", expected, actual, diff), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// validateEqualArgs checks whether provided arguments can be safely used in the
+// Equal/NotEqual functions.
+func validateEqualArgs(expected, actual interface{}) error {
+ if expected == nil && actual == nil {
+ return nil
+ }
+
+ if isFunction(expected) || isFunction(actual) {
+ return errors.New("cannot take func type as argument")
+ }
+ return nil
+}
+
+// Same asserts that two pointers reference the same object.
+//
+// assert.Same(t, ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if !samePointers(expected, actual) {
+ return Fail(t, fmt.Sprintf("Not same: \n"+
+ "expected: %p %#v\n"+
+ "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
+ }
+
+ return true
+}
+
+// NotSame asserts that two pointers do not reference the same object.
+//
+// assert.NotSame(t, ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if samePointers(expected, actual) {
+ return Fail(t, fmt.Sprintf(
+ "Expected and actual point to the same object: %p %#v",
+ expected, expected), msgAndArgs...)
+ }
+ return true
+}
+
+// samePointers compares two generic interface objects and returns whether
+// they point to the same object
+func samePointers(first, second interface{}) bool {
+ firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
+ if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
+ return false
+ }
+
+ firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
+ if firstType != secondType {
+ return false
+ }
+
+ // compare pointer addresses
+ return first == second
+}
+
+// formatUnequalValues takes two values of arbitrary types and returns string
+// representations appropriate to be presented to the user.
+//
+// If the values are not of like type, the returned strings will be prefixed
+// with the type name, and the value will be enclosed in parentheses similar
+// to a type conversion in the Go grammar.
+func formatUnequalValues(expected, actual interface{}) (e string, a string) {
+ if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
+ return fmt.Sprintf("%T(%s)", expected, truncatingFormat(expected)),
+ fmt.Sprintf("%T(%s)", actual, truncatingFormat(actual))
+ }
+ switch expected.(type) {
+ case time.Duration:
+ return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual)
+ }
+ return truncatingFormat(expected), truncatingFormat(actual)
+}
+
+// truncatingFormat formats the data and truncates it if it's too long.
+//
+// This helps keep formatted error messages lines from exceeding the
+// bufio.MaxScanTokenSize max line length that the go testing framework imposes.
+func truncatingFormat(data interface{}) string {
+ value := fmt.Sprintf("%#v", data)
+ max := bufio.MaxScanTokenSize - 100 // Give us some space the type info too if needed.
+ if len(value) > max {
+ value = value[0:max] + "<... truncated>"
+ }
+ return value
+}
+
+// EqualValues asserts that two objects are equal or convertible to the same types
+// and equal.
+//
+// assert.EqualValues(t, uint32(123), int32(123))
+func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if !ObjectsAreEqualValues(expected, actual) {
+ diff := diff(expected, actual)
+ expected, actual = formatUnequalValues(expected, actual)
+ return Fail(t, fmt.Sprintf("Not equal: \n"+
+ "expected: %s\n"+
+ "actual : %s%s", expected, actual, diff), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// EqualExportedValues asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
+//
+// type S struct {
+// Exported int
+// notExported int
+// }
+// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true
+// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false
+func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ aType := reflect.TypeOf(expected)
+ bType := reflect.TypeOf(actual)
+
+ if aType != bType {
+ return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
+ }
+
+ if aType.Kind() == reflect.Ptr {
+ aType = aType.Elem()
+ }
+ if bType.Kind() == reflect.Ptr {
+ bType = bType.Elem()
+ }
+
+ if aType.Kind() != reflect.Struct {
+ return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...)
+ }
+
+ if bType.Kind() != reflect.Struct {
+ return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...)
+ }
+
+ expected = copyExportedFields(expected)
+ actual = copyExportedFields(actual)
+
+ if !ObjectsAreEqualValues(expected, actual) {
+ diff := diff(expected, actual)
+ expected, actual = formatUnequalValues(expected, actual)
+ return Fail(t, fmt.Sprintf("Not equal (comparing only exported fields): \n"+
+ "expected: %s\n"+
+ "actual : %s%s", expected, actual, diff), msgAndArgs...)
+ }
+
+ return true
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+// assert.Exactly(t, int32(123), int64(123))
+func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ aType := reflect.TypeOf(expected)
+ bType := reflect.TypeOf(actual)
+
+ if aType != bType {
+ return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
+ }
+
+ return Equal(t, expected, actual, msgAndArgs...)
+
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// assert.NotNil(t, err)
+func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ if !isNil(object) {
+ return true
+ }
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, "Expected value not to be nil.", msgAndArgs...)
+}
+
+// isNil checks if a specified object is nil or not, without Failing.
+func isNil(object interface{}) bool {
+ if object == nil {
+ return true
+ }
+
+ value := reflect.ValueOf(object)
+ switch value.Kind() {
+ case
+ reflect.Chan, reflect.Func,
+ reflect.Interface, reflect.Map,
+ reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
+
+ return value.IsNil()
+ }
+
+ return false
+}
+
+// Nil asserts that the specified object is nil.
+//
+// assert.Nil(t, err)
+func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ if isNil(object) {
+ return true
+ }
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
+}
+
+// isEmpty gets whether the specified object is considered empty or not.
+func isEmpty(object interface{}) bool {
+
+ // get nil case out of the way
+ if object == nil {
+ return true
+ }
+
+ objValue := reflect.ValueOf(object)
+
+ switch objValue.Kind() {
+ // collection types are empty when they have no element
+ case reflect.Chan, reflect.Map, reflect.Slice:
+ return objValue.Len() == 0
+ // pointers are empty if nil or if the value they point to is empty
+ case reflect.Ptr:
+ if objValue.IsNil() {
+ return true
+ }
+ deref := objValue.Elem().Interface()
+ return isEmpty(deref)
+ // for all other types, compare against the zero value
+ // array types are empty when they match their zero-initialized state
+ default:
+ zero := reflect.Zero(objValue.Type())
+ return reflect.DeepEqual(object, zero.Interface())
+ }
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Empty(t, obj)
+func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ pass := isEmpty(object)
+ if !pass {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
+ }
+
+ return pass
+
+}
+
+// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmpty(t, obj) {
+// assert.Equal(t, "two", obj[1])
+// }
+func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ pass := !isEmpty(object)
+ if !pass {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
+ }
+
+ return pass
+
+}
+
+// getLen tries to get the length of an object.
+// It returns (0, false) if impossible.
+func getLen(x interface{}) (length int, ok bool) {
+ v := reflect.ValueOf(x)
+ defer func() {
+ ok = recover() == nil
+ }()
+ return v.Len(), true
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+// assert.Len(t, mySlice, 3)
+func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ l, ok := getLen(object)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...)
+ }
+
+ if l != length {
+ return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
+ }
+ return true
+}
+
+// True asserts that the specified value is true.
+//
+// assert.True(t, myBool)
+func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+ if !value {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, "Should be true", msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// False asserts that the specified value is false.
+//
+// assert.False(t, myBool)
+func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+ if value {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, "Should be false", msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// assert.NotEqual(t, obj1, obj2)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if err := validateEqualArgs(expected, actual); err != nil {
+ return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)",
+ expected, actual, err), msgAndArgs...)
+ }
+
+ if ObjectsAreEqual(expected, actual) {
+ return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValues(t, obj1, obj2)
+func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if ObjectsAreEqualValues(expected, actual) {
+ return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
+ }
+
+ return true
+}
+
+// containsElement try loop over the list check if the list includes the element.
+// return (false, false) if impossible.
+// return (true, false) if element was not found.
+// return (true, true) if element was found.
+func containsElement(list interface{}, element interface{}) (ok, found bool) {
+
+ listValue := reflect.ValueOf(list)
+ listType := reflect.TypeOf(list)
+ if listType == nil {
+ return false, false
+ }
+ listKind := listType.Kind()
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ found = false
+ }
+ }()
+
+ if listKind == reflect.String {
+ elementValue := reflect.ValueOf(element)
+ return true, strings.Contains(listValue.String(), elementValue.String())
+ }
+
+ if listKind == reflect.Map {
+ mapKeys := listValue.MapKeys()
+ for i := 0; i < len(mapKeys); i++ {
+ if ObjectsAreEqual(mapKeys[i].Interface(), element) {
+ return true, true
+ }
+ }
+ return true, false
+ }
+
+ for i := 0; i < listValue.Len(); i++ {
+ if ObjectsAreEqual(listValue.Index(i).Interface(), element) {
+ return true, true
+ }
+ }
+ return true, false
+
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Contains(t, "Hello World", "World")
+// assert.Contains(t, ["Hello", "World"], "World")
+// assert.Contains(t, {"Hello": "World"}, "Hello")
+func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ok, found := containsElement(s, contains)
+ if !ok {
+ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", s, contains), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContains(t, "Hello World", "Earth")
+// assert.NotContains(t, ["Hello", "World"], "Earth")
+// assert.NotContains(t, {"Hello": "World"}, "Earth")
+func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ok, found := containsElement(s, contains)
+ if !ok {
+ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
+ }
+ if found {
+ return Fail(t, fmt.Sprintf("%#v should not contain %#v", s, contains), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// Subset asserts that the specified list(array, slice...) or map contains all
+// elements given in the specified subset list(array, slice...) or map.
+//
+// assert.Subset(t, [1, 2, 3], [1, 2])
+// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
+func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if subset == nil {
+ return true // we consider nil to be equal to the nil set
+ }
+
+ listKind := reflect.TypeOf(list).Kind()
+ if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+ }
+
+ subsetKind := reflect.TypeOf(subset).Kind()
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+ }
+
+ if subsetKind == reflect.Map && listKind == reflect.Map {
+ subsetMap := reflect.ValueOf(subset)
+ actualMap := reflect.ValueOf(list)
+
+ for _, k := range subsetMap.MapKeys() {
+ ev := subsetMap.MapIndex(k)
+ av := actualMap.MapIndex(k)
+
+ if !av.IsValid() {
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
+ }
+ if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
+ }
+ }
+
+ return true
+ }
+
+ subsetList := reflect.ValueOf(subset)
+ for i := 0; i < subsetList.Len(); i++ {
+ element := subsetList.Index(i).Interface()
+ ok, found := containsElement(list, element)
+ if !ok {
+ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...)
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...)
+ }
+ }
+
+ return true
+}
+
+// NotSubset asserts that the specified list(array, slice...) or map does NOT
+// contain all elements given in the specified subset list(array, slice...) or
+// map.
+//
+// assert.NotSubset(t, [1, 3, 4], [1, 2])
+// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
+func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if subset == nil {
+ return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
+ }
+
+ listKind := reflect.TypeOf(list).Kind()
+ if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+ }
+
+ subsetKind := reflect.TypeOf(subset).Kind()
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+ }
+
+ if subsetKind == reflect.Map && listKind == reflect.Map {
+ subsetMap := reflect.ValueOf(subset)
+ actualMap := reflect.ValueOf(list)
+
+ for _, k := range subsetMap.MapKeys() {
+ ev := subsetMap.MapIndex(k)
+ av := actualMap.MapIndex(k)
+
+ if !av.IsValid() {
+ return true
+ }
+ if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
+ return true
+ }
+ }
+
+ return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
+ }
+
+ subsetList := reflect.ValueOf(subset)
+ for i := 0; i < subsetList.Len(); i++ {
+ element := subsetList.Index(i).Interface()
+ ok, found := containsElement(list, element)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ }
+ if !found {
+ return true
+ }
+ }
+
+ return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2])
+func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if isEmpty(listA) && isEmpty(listB) {
+ return true
+ }
+
+ if !isList(t, listA, msgAndArgs...) || !isList(t, listB, msgAndArgs...) {
+ return false
+ }
+
+ extraA, extraB := diffLists(listA, listB)
+
+ if len(extraA) == 0 && len(extraB) == 0 {
+ return true
+ }
+
+ return Fail(t, formatListDiff(listA, listB, extraA, extraB), msgAndArgs...)
+}
+
+// isList checks that the provided value is array or slice.
+func isList(t TestingT, list interface{}, msgAndArgs ...interface{}) (ok bool) {
+ kind := reflect.TypeOf(list).Kind()
+ if kind != reflect.Array && kind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s, expecting array or slice", list, kind),
+ msgAndArgs...)
+ }
+ return true
+}
+
+// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B.
+// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and
+// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored.
+func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) {
+ aValue := reflect.ValueOf(listA)
+ bValue := reflect.ValueOf(listB)
+
+ aLen := aValue.Len()
+ bLen := bValue.Len()
+
+ // Mark indexes in bValue that we already used
+ visited := make([]bool, bLen)
+ for i := 0; i < aLen; i++ {
+ element := aValue.Index(i).Interface()
+ found := false
+ for j := 0; j < bLen; j++ {
+ if visited[j] {
+ continue
+ }
+ if ObjectsAreEqual(bValue.Index(j).Interface(), element) {
+ visited[j] = true
+ found = true
+ break
+ }
+ }
+ if !found {
+ extraA = append(extraA, element)
+ }
+ }
+
+ for j := 0; j < bLen; j++ {
+ if visited[j] {
+ continue
+ }
+ extraB = append(extraB, bValue.Index(j).Interface())
+ }
+
+ return
+}
+
+func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) string {
+ var msg bytes.Buffer
+
+ msg.WriteString("elements differ")
+ if len(extraA) > 0 {
+ msg.WriteString("\n\nextra elements in list A:\n")
+ msg.WriteString(spewConfig.Sdump(extraA))
+ }
+ if len(extraB) > 0 {
+ msg.WriteString("\n\nextra elements in list B:\n")
+ msg.WriteString(spewConfig.Sdump(extraB))
+ }
+ msg.WriteString("\n\nlistA:\n")
+ msg.WriteString(spewConfig.Sdump(listA))
+ msg.WriteString("\n\nlistB:\n")
+ msg.WriteString(spewConfig.Sdump(listB))
+
+ return msg.String()
+}
+
+// Condition uses a Comparison to assert a complex condition.
+func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ result := comp()
+ if !result {
+ Fail(t, "Condition failed!", msgAndArgs...)
+ }
+ return result
+}
+
+// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics
+// methods, and represents a simple func that takes no arguments, and returns nothing.
+type PanicTestFunc func()
+
+// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
+func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) {
+ didPanic = true
+
+ defer func() {
+ message = recover()
+ if didPanic {
+ stack = string(debug.Stack())
+ }
+ }()
+
+ // call the target function
+ f()
+ didPanic = false
+
+ return
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panics(t, func(){ GoCrazy() })
+func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+ }
+
+ return true
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ funcDidPanic, panicValue, panickedStack := didPanic(f)
+ if !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+ }
+ if panicValue != expected {
+ return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...)
+ }
+
+ return true
+}
+
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
+func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ funcDidPanic, panicValue, panickedStack := didPanic(f)
+ if !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+ }
+ panicErr, ok := panicValue.(error)
+ if !ok || panicErr.Error() != errString {
+ return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...)
+ }
+
+ return true
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanics(t, func(){ RemainCalm() })
+func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...)
+ }
+
+ return true
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
+func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ dt := expected.Sub(actual)
+ if dt < -delta || dt > delta {
+ return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+ }
+
+ return true
+}
+
+// WithinRange asserts that a time is within a time range (inclusive).
+//
+// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if end.Before(start) {
+ return Fail(t, "Start should be before end", msgAndArgs...)
+ }
+
+ if actual.Before(start) {
+ return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is before the range", actual, start, end), msgAndArgs...)
+ } else if actual.After(end) {
+ return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is after the range", actual, start, end), msgAndArgs...)
+ }
+
+ return true
+}
+
+func toFloat(x interface{}) (float64, bool) {
+ var xf float64
+ xok := true
+
+ switch xn := x.(type) {
+ case uint:
+ xf = float64(xn)
+ case uint8:
+ xf = float64(xn)
+ case uint16:
+ xf = float64(xn)
+ case uint32:
+ xf = float64(xn)
+ case uint64:
+ xf = float64(xn)
+ case int:
+ xf = float64(xn)
+ case int8:
+ xf = float64(xn)
+ case int16:
+ xf = float64(xn)
+ case int32:
+ xf = float64(xn)
+ case int64:
+ xf = float64(xn)
+ case float32:
+ xf = float64(xn)
+ case float64:
+ xf = xn
+ case time.Duration:
+ xf = float64(xn)
+ default:
+ xok = false
+ }
+
+ return xf, xok
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
+func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ af, aok := toFloat(expected)
+ bf, bok := toFloat(actual)
+
+ if !aok || !bok {
+ return Fail(t, "Parameters must be numerical", msgAndArgs...)
+ }
+
+ if math.IsNaN(af) && math.IsNaN(bf) {
+ return true
+ }
+
+ if math.IsNaN(af) {
+ return Fail(t, "Expected must not be NaN", msgAndArgs...)
+ }
+
+ if math.IsNaN(bf) {
+ return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...)
+ }
+
+ dt := af - bf
+ if dt < -delta || dt > delta {
+ return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+ }
+
+ return true
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Slice ||
+ reflect.TypeOf(expected).Kind() != reflect.Slice {
+ return Fail(t, "Parameters must be slice", msgAndArgs...)
+ }
+
+ actualSlice := reflect.ValueOf(actual)
+ expectedSlice := reflect.ValueOf(expected)
+
+ for i := 0; i < actualSlice.Len(); i++ {
+ result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...)
+ if !result {
+ return result
+ }
+ }
+
+ return true
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Map ||
+ reflect.TypeOf(expected).Kind() != reflect.Map {
+ return Fail(t, "Arguments must be maps", msgAndArgs...)
+ }
+
+ expectedMap := reflect.ValueOf(expected)
+ actualMap := reflect.ValueOf(actual)
+
+ if expectedMap.Len() != actualMap.Len() {
+ return Fail(t, "Arguments must have the same number of keys", msgAndArgs...)
+ }
+
+ for _, k := range expectedMap.MapKeys() {
+ ev := expectedMap.MapIndex(k)
+ av := actualMap.MapIndex(k)
+
+ if !ev.IsValid() {
+ return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...)
+ }
+
+ if !av.IsValid() {
+ return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...)
+ }
+
+ if !InDelta(
+ t,
+ ev.Interface(),
+ av.Interface(),
+ delta,
+ msgAndArgs...,
+ ) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func calcRelativeError(expected, actual interface{}) (float64, error) {
+ af, aok := toFloat(expected)
+ bf, bok := toFloat(actual)
+ if !aok || !bok {
+ return 0, fmt.Errorf("Parameters must be numerical")
+ }
+ if math.IsNaN(af) && math.IsNaN(bf) {
+ return 0, nil
+ }
+ if math.IsNaN(af) {
+ return 0, errors.New("expected value must not be NaN")
+ }
+ if af == 0 {
+ return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
+ }
+ if math.IsNaN(bf) {
+ return 0, errors.New("actual value must not be NaN")
+ }
+
+ return math.Abs(af-bf) / math.Abs(af), nil
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if math.IsNaN(epsilon) {
+ return Fail(t, "epsilon must not be NaN", msgAndArgs...)
+ }
+ actualEpsilon, err := calcRelativeError(expected, actual)
+ if err != nil {
+ return Fail(t, err.Error(), msgAndArgs...)
+ }
+ if actualEpsilon > epsilon {
+ return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
+ }
+
+ return true
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if expected == nil || actual == nil {
+ return Fail(t, "Parameters must be slice", msgAndArgs...)
+ }
+
+ expectedSlice := reflect.ValueOf(expected)
+ actualSlice := reflect.ValueOf(actual)
+
+ if expectedSlice.Type().Kind() != reflect.Slice {
+ return Fail(t, "Expected value must be slice", msgAndArgs...)
+ }
+
+ expectedLen := expectedSlice.Len()
+ if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) {
+ return false
+ }
+
+ for i := 0; i < expectedLen; i++ {
+ if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) {
+ return false
+ }
+ }
+
+ return true
+}
+
+/*
+ Errors
+*/
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoError(t, err) {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
+ if err != nil {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
+ }
+
+ return true
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Error(t, err) {
+// assert.Equal(t, expectedError, err)
+// }
+func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
+ if err == nil {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, "An error is expected but got nil.", msgAndArgs...)
+ }
+
+ return true
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualError(t, err, expectedErrorString)
+func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !Error(t, theError, msgAndArgs...) {
+ return false
+ }
+ expected := errString
+ actual := theError.Error()
+ // don't need to use deep equals here, we know they are both strings
+ if expected != actual {
+ return Fail(t, fmt.Sprintf("Error message not equal:\n"+
+ "expected: %q\n"+
+ "actual : %q", expected, actual), msgAndArgs...)
+ }
+ return true
+}
+
+// ErrorContains asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// assert.ErrorContains(t, err, expectedErrorSubString)
+func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !Error(t, theError, msgAndArgs...) {
+ return false
+ }
+
+ actual := theError.Error()
+ if !strings.Contains(actual, contains) {
+ return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...)
+ }
+
+ return true
+}
+
+// matchRegexp return true if a specified regexp matches a string.
+func matchRegexp(rx interface{}, str interface{}) bool {
+
+ var r *regexp.Regexp
+ if rr, ok := rx.(*regexp.Regexp); ok {
+ r = rr
+ } else {
+ r = regexp.MustCompile(fmt.Sprint(rx))
+ }
+
+ return (r.FindStringIndex(fmt.Sprint(str)) != nil)
+
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+// assert.Regexp(t, "start...$", "it's not starting")
+func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ match := matchRegexp(rx, str)
+
+ if !match {
+ Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...)
+ }
+
+ return match
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+// assert.NotRegexp(t, "^start", "it's not starting")
+func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ match := matchRegexp(rx, str)
+
+ if match {
+ Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...)
+ }
+
+ return !match
+
+}
+
+// Zero asserts that i is the zero value for its type.
+func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+ return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...)
+ }
+ return true
+}
+
+// NotZero asserts that i is not the zero value for its type.
+func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+ return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...)
+ }
+ return true
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+ }
+ return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+ }
+ if info.IsDir() {
+ return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...)
+ }
+ return true
+}
+
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ return true
+ }
+ if info.IsDir() {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("file %q exists", path), msgAndArgs...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+ }
+ return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+ }
+ if !info.IsDir() {
+ return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...)
+ }
+ return true
+}
+
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return true
+ }
+ return true
+ }
+ if !info.IsDir() {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("directory %q exists", path), msgAndArgs...)
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ var expectedJSONAsInterface, actualJSONAsInterface interface{}
+
+ if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
+ }
+
+ if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
+ }
+
+ return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...)
+}
+
+// YAMLEq asserts that two YAML strings are equivalent.
+func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ var expectedYAMLAsInterface, actualYAMLAsInterface interface{}
+
+ if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...)
+ }
+
+ if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...)
+ }
+
+ return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...)
+}
+
+func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
+ t := reflect.TypeOf(v)
+ k := t.Kind()
+
+ if k == reflect.Ptr {
+ t = t.Elem()
+ k = t.Kind()
+ }
+ return t, k
+}
+
+// diff returns a diff of both values as long as both are of the same type and
+// are a struct, map, slice, array or string. Otherwise it returns an empty string.
+func diff(expected interface{}, actual interface{}) string {
+ if expected == nil || actual == nil {
+ return ""
+ }
+
+ et, ek := typeAndKind(expected)
+ at, _ := typeAndKind(actual)
+
+ if et != at {
+ return ""
+ }
+
+ if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String {
+ return ""
+ }
+
+ var e, a string
+
+ switch et {
+ case reflect.TypeOf(""):
+ e = reflect.ValueOf(expected).String()
+ a = reflect.ValueOf(actual).String()
+ case reflect.TypeOf(time.Time{}):
+ e = spewConfigStringerEnabled.Sdump(expected)
+ a = spewConfigStringerEnabled.Sdump(actual)
+ default:
+ e = spewConfig.Sdump(expected)
+ a = spewConfig.Sdump(actual)
+ }
+
+ diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
+ A: difflib.SplitLines(e),
+ B: difflib.SplitLines(a),
+ FromFile: "Expected",
+ FromDate: "",
+ ToFile: "Actual",
+ ToDate: "",
+ Context: 1,
+ })
+
+ return "\n\nDiff:\n" + diff
+}
+
+func isFunction(arg interface{}) bool {
+ if arg == nil {
+ return false
+ }
+ return reflect.TypeOf(arg).Kind() == reflect.Func
+}
+
+var spewConfig = spew.ConfigState{
+ Indent: " ",
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+ SortKeys: true,
+ DisableMethods: true,
+ MaxDepth: 10,
+}
+
+var spewConfigStringerEnabled = spew.ConfigState{
+ Indent: " ",
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+ SortKeys: true,
+ MaxDepth: 10,
+}
+
+type tHelper interface {
+ Helper()
+}
+
+// Eventually asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
+func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ch := make(chan bool, 1)
+
+ timer := time.NewTimer(waitFor)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(tick)
+ defer ticker.Stop()
+
+ for tick := ticker.C; ; {
+ select {
+ case <-timer.C:
+ return Fail(t, "Condition never satisfied", msgAndArgs...)
+ case <-tick:
+ tick = nil
+ go func() { ch <- condition() }()
+ case v := <-ch:
+ if v {
+ return true
+ }
+ tick = ticker.C
+ }
+ }
+}
+
+// CollectT implements the TestingT interface and collects all errors.
+type CollectT struct {
+ errors []error
+}
+
+// Errorf collects the error.
+func (c *CollectT) Errorf(format string, args ...interface{}) {
+ c.errors = append(c.errors, fmt.Errorf(format, args...))
+}
+
+// FailNow panics.
+func (*CollectT) FailNow() {
+ panic("Assertion failed")
+}
+
+// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
+func (*CollectT) Reset() {
+ panic("Reset() is deprecated")
+}
+
+// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
+func (*CollectT) Copy(TestingT) {
+ panic("Copy() is deprecated")
+}
+
+// EventuallyWithT asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+// externalValue := false
+// go func() {
+// time.Sleep(8*time.Second)
+// externalValue = true
+// }()
+// assert.EventuallyWithT(t, func(c *assert.CollectT) {
+// // add assertions as needed; any assertion failure will fail the current tick
+// assert.True(c, externalValue, "expected 'externalValue' to be true")
+// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ var lastFinishedTickErrs []error
+ ch := make(chan []error, 1)
+
+ timer := time.NewTimer(waitFor)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(tick)
+ defer ticker.Stop()
+
+ for tick := ticker.C; ; {
+ select {
+ case <-timer.C:
+ for _, err := range lastFinishedTickErrs {
+ t.Errorf("%v", err)
+ }
+ return Fail(t, "Condition never satisfied", msgAndArgs...)
+ case <-tick:
+ tick = nil
+ go func() {
+ collect := new(CollectT)
+ defer func() {
+ ch <- collect.errors
+ }()
+ condition(collect)
+ }()
+ case errs := <-ch:
+ if len(errs) == 0 {
+ return true
+ }
+ // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
+ lastFinishedTickErrs = errs
+ tick = ticker.C
+ }
+ }
+}
+
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
+func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ch := make(chan bool, 1)
+
+ timer := time.NewTimer(waitFor)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(tick)
+ defer ticker.Stop()
+
+ for tick := ticker.C; ; {
+ select {
+ case <-timer.C:
+ return true
+ case <-tick:
+ tick = nil
+ go func() { ch <- condition() }()
+ case v := <-ch:
+ if v {
+ return Fail(t, "Condition satisfied", msgAndArgs...)
+ }
+ tick = ticker.C
+ }
+ }
+}
+
+// ErrorIs asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if errors.Is(err, target) {
+ return true
+ }
+
+ var expectedText string
+ if target != nil {
+ expectedText = target.Error()
+ }
+
+ chain := buildErrorChainString(err)
+
+ return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
+ "expected: %q\n"+
+ "in chain: %s", expectedText, chain,
+ ), msgAndArgs...)
+}
+
+// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !errors.Is(err, target) {
+ return true
+ }
+
+ var expectedText string
+ if target != nil {
+ expectedText = target.Error()
+ }
+
+ chain := buildErrorChainString(err)
+
+ return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
+ "found: %q\n"+
+ "in chain: %s", expectedText, chain,
+ ), msgAndArgs...)
+}
+
+// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if errors.As(err, target) {
+ return true
+ }
+
+ chain := buildErrorChainString(err)
+
+ return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
+ "expected: %q\n"+
+ "in chain: %s", target, chain,
+ ), msgAndArgs...)
+}
+
+func buildErrorChainString(err error) string {
+ if err == nil {
+ return ""
+ }
+
+ e := errors.Unwrap(err)
+ chain := fmt.Sprintf("%q", err.Error())
+ for e != nil {
+ chain += fmt.Sprintf("\n\t%q", e.Error())
+ e = errors.Unwrap(e)
+ }
+ return chain
+}
diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go
new file mode 100644
index 0000000..4953981
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/doc.go
@@ -0,0 +1,46 @@
+// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
+//
+// # Example Usage
+//
+// The following is a complete example using assert in a standard test function:
+//
+// import (
+// "testing"
+// "github.com/stretchr/testify/assert"
+// )
+//
+// func TestSomething(t *testing.T) {
+//
+// var a string = "Hello"
+// var b string = "Hello"
+//
+// assert.Equal(t, a, b, "The two words should be the same.")
+//
+// }
+//
+// if you assert many times, use the format below:
+//
+// import (
+// "testing"
+// "github.com/stretchr/testify/assert"
+// )
+//
+// func TestSomething(t *testing.T) {
+// assert := assert.New(t)
+//
+// var a string = "Hello"
+// var b string = "Hello"
+//
+// assert.Equal(a, b, "The two words should be the same.")
+// }
+//
+// # Assertions
+//
+// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
+// All assertion functions take, as the first argument, the `*testing.T` object provided by the
+// testing framework. This allows the assertion funcs to write the failings and other details to
+// the correct place.
+//
+// Every assertion function also takes an optional string message as the final argument,
+// allowing custom error messages to be appended to the message the assertion method outputs.
+package assert
diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go
new file mode 100644
index 0000000..ac9dc9d
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/errors.go
@@ -0,0 +1,10 @@
+package assert
+
+import (
+ "errors"
+)
+
+// AnError is an error instance useful for testing. If the code does not care
+// about error specifics, and only needs to return the error for example, this
+// error should be used to make the test code more readable.
+var AnError = errors.New("assert.AnError general error for testing")
diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
new file mode 100644
index 0000000..df189d2
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
@@ -0,0 +1,16 @@
+package assert
+
+// Assertions provides assertion methods around the
+// TestingT interface.
+type Assertions struct {
+ t TestingT
+}
+
+// New makes a new Assertions object for the specified TestingT.
+func New(t TestingT) *Assertions {
+ return &Assertions{
+ t: t,
+ }
+}
+
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs"
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
new file mode 100644
index 0000000..861ed4b
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -0,0 +1,165 @@
+package assert
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strings"
+)
+
+// httpCode is a helper that returns HTTP code of the response. It returns -1 and
+// an error if building a new request fails.
+func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
+ w := httptest.NewRecorder()
+ req, err := http.NewRequest(method, url, http.NoBody)
+ if err != nil {
+ return -1, err
+ }
+ req.URL.RawQuery = values.Encode()
+ handler(w, req)
+ return w.Code, nil
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
+ }
+
+ isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
+ if !isSuccessCode {
+ Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
+ }
+
+ return isSuccessCode
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
+ }
+
+ isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
+ if !isRedirectCode {
+ Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
+ }
+
+ return isRedirectCode
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
+ }
+
+ isErrorCode := code >= http.StatusBadRequest
+ if !isErrorCode {
+ Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
+ }
+
+ return isErrorCode
+}
+
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
+ }
+
+ successful := code == statuscode
+ if !successful {
+ Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...)
+ }
+
+ return successful
+}
+
+// HTTPBody is a helper that returns HTTP body of the response. It returns
+// empty string if building a new request fails.
+func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
+ w := httptest.NewRecorder()
+ if len(values) > 0 {
+ url += "?" + values.Encode()
+ }
+ req, err := http.NewRequest(method, url, http.NoBody)
+ if err != nil {
+ return ""
+ }
+ handler(w, req)
+ return w.Body.String()
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ body := HTTPBody(handler, method, url, values)
+
+ contains := strings.Contains(body, fmt.Sprint(str))
+ if !contains {
+ Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...)
+ }
+
+ return contains
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ body := HTTPBody(handler, method, url, values)
+
+ contains := strings.Contains(body, fmt.Sprint(str))
+ if contains {
+ Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...)
+ }
+
+ return !contains
+}
diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/exp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/exp/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go
new file mode 100644
index 0000000..2c033df
--- /dev/null
+++ b/vendor/golang.org/x/exp/constraints/constraints.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package constraints defines a set of useful constraints to be used
+// with type parameters.
+package constraints
+
+// Signed is a constraint that permits any signed integer type.
+// If future releases of Go add new predeclared signed integer types,
+// this constraint will be modified to include them.
+type Signed interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// Unsigned is a constraint that permits any unsigned integer type.
+// If future releases of Go add new predeclared unsigned integer types,
+// this constraint will be modified to include them.
+type Unsigned interface {
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// Integer is a constraint that permits any integer type.
+// If future releases of Go add new predeclared integer types,
+// this constraint will be modified to include them.
+type Integer interface {
+ Signed | Unsigned
+}
+
+// Float is a constraint that permits any floating-point type.
+// If future releases of Go add new predeclared floating-point types,
+// this constraint will be modified to include them.
+type Float interface {
+ ~float32 | ~float64
+}
+
+// Complex is a constraint that permits any complex numeric type.
+// If future releases of Go add new predeclared complex numeric types,
+// this constraint will be modified to include them.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// Ordered is a constraint that permits any ordered type: any type
+// that supports the operators < <= >= >.
+// If future releases of Go add new ordered types,
+// this constraint will be modified to include them.
+type Ordered interface {
+ Integer | Float | ~string
+}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
new file mode 100644
index 0000000..2540bd6
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -0,0 +1,258 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices defines various functions useful with slices of any type.
+// Unless otherwise specified, these functions all apply to the elements
+// of a slice at index 0 <= i < len(s).
+//
+// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a
+// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings),
+// or the sorting may fail to sort correctly. A common case is when sorting slices of
+// floating-point numbers containing NaN values.
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. If the lengths are different, Equal returns false.
+// Otherwise, the elements are compared in increasing index order, and the
+// comparison stops at the first unequal pair.
+// Floating point NaNs are not considered equal.
+func Equal[E comparable](s1, s2 []E) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFunc reports whether two slices are equal using a comparison
+// function on each pair of elements. If the lengths are different,
+// EqualFunc returns false. Otherwise, the elements are compared in
+// increasing index order, and the comparison stops at the first index
+// for which eq returns false.
+func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Compare compares the elements of s1 and s2.
+// The elements are compared sequentially, starting at index 0,
+// until one element is not equal to the other.
+// The result of comparing the first non-matching elements is returned.
+// If both slices are equal until one of them ends, the shorter slice is
+// considered less than the longer one.
+// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
+// Comparisons involving floating point NaNs are ignored.
+func Compare[E constraints.Ordered](s1, s2 []E) int {
+ s2len := len(s2)
+ for i, v1 := range s1 {
+ if i >= s2len {
+ return +1
+ }
+ v2 := s2[i]
+ switch {
+ case v1 < v2:
+ return -1
+ case v1 > v2:
+ return +1
+ }
+ }
+ if len(s1) < s2len {
+ return -1
+ }
+ return 0
+}
+
+// CompareFunc is like Compare but uses a comparison function
+// on each pair of elements. The elements are compared in increasing
+// index order, and the comparisons stop after the first time cmp
+// returns non-zero.
+// The result is the first non-zero result of cmp; if cmp always
+// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
+// and +1 if len(s1) > len(s2).
+func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
+ s2len := len(s2)
+ for i, v1 := range s1 {
+ if i >= s2len {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmp(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < s2len {
+ return -1
+ }
+ return 0
+}
+
+// Index returns the index of the first occurrence of v in s,
+// or -1 if not present.
+func Index[E comparable](s []E, v E) int {
+ for i := range s {
+ if v == s[i] {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexFunc returns the first index i satisfying f(s[i]),
+// or -1 if none do.
+func IndexFunc[E any](s []E, f func(E) bool) int {
+ for i := range s {
+ if f(s[i]) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Contains reports whether v is present in s.
+func Contains[E comparable](s []E, v E) bool {
+ return Index(s, v) >= 0
+}
+
+// ContainsFunc reports whether at least one
+// element e of s satisfies f(e).
+func ContainsFunc[E any](s []E, f func(E) bool) bool {
+ return IndexFunc(s, f) >= 0
+}
+
+// Insert inserts the values v... into s at index i,
+// returning the modified slice.
+// In the returned slice r, r[i] == v[0].
+// Insert panics if i is out of range.
+// This function is O(len(s) + len(v)).
+func Insert[S ~[]E, E any](s S, i int, v ...E) S {
+ tot := len(s) + len(v)
+ if tot <= cap(s) {
+ s2 := s[:tot]
+ copy(s2[i+len(v):], s[i:])
+ copy(s2[i:], v)
+ return s2
+ }
+ s2 := make(S, tot)
+ copy(s2, s[:i])
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[i:])
+ return s2
+}
+
+// Delete removes the elements s[i:j] from s, returning the modified slice.
+// Delete panics if s[i:j] is not a valid slice of s.
+// Delete modifies the contents of the slice s; it does not create a new slice.
+// Delete is O(len(s)-j), so if many items must be deleted, it is better to
+// make a single call deleting them all together than to delete one at a time.
+// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
+// elements contain pointers you might consider zeroing those elements so that
+// objects they reference can be garbage collected.
+func Delete[S ~[]E, E any](s S, i, j int) S {
+ _ = s[i:j] // bounds check
+
+ return append(s[:i], s[j:]...)
+}
+
+// Replace replaces the elements s[i:j] by the given v, and returns the
+// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
+ _ = s[i:j] // verify that i:j is a valid subslice
+ tot := len(s[:i]) + len(v) + len(s[j:])
+ if tot <= cap(s) {
+ s2 := s[:tot]
+ copy(s2[i+len(v):], s[j:])
+ copy(s2[i:], v)
+ return s2
+ }
+ s2 := make(S, tot)
+ copy(s2, s[:i])
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[j:])
+ return s2
+}
+
+// Clone returns a copy of the slice.
+// The elements are copied using assignment, so this is a shallow clone.
+func Clone[S ~[]E, E any](s S) S {
+ // Preserve nil in case it matters.
+ if s == nil {
+ return nil
+ }
+ return append(S([]E{}), s...)
+}
+
+// Compact replaces consecutive runs of equal elements with a single copy.
+// This is like the uniq command found on Unix.
+// Compact modifies the contents of the slice s; it does not create a new slice.
+// When Compact discards m elements in total, it might not modify the elements
+// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
+// zeroing those elements so that objects they reference can be garbage collected.
+func Compact[S ~[]E, E comparable](s S) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ for k := 1; k < len(s); k++ {
+ if s[k] != s[k-1] {
+ if i != k {
+ s[i] = s[k]
+ }
+ i++
+ }
+ }
+ return s[:i]
+}
+
+// CompactFunc is like Compact but uses a comparison function.
+func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ for k := 1; k < len(s); k++ {
+ if !eq(s[k], s[k-1]) {
+ if i != k {
+ s[i] = s[k]
+ }
+ i++
+ }
+ }
+ return s[:i]
+}
+
+// Grow increases the slice's capacity, if necessary, to guarantee space for
+// another n elements. After Grow(n), at least n elements can be appended
+// to the slice without another allocation. If n is negative or too large to
+// allocate the memory, Grow panics.
+func Grow[S ~[]E, E any](s S, n int) S {
+ if n < 0 {
+ panic("cannot be negative")
+ }
+ if n -= cap(s) - len(s); n > 0 {
+ // TODO(https://go.dev/issue/53888): Make using []E instead of S
+ // to workaround a compiler bug where the runtime.growslice optimization
+ // does not take effect. Revert when the compiler is fixed.
+ s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
+ }
+ return s
+}
+
+// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
+func Clip[S ~[]E, E any](s S) S {
+ return s[:len(s):len(s)]
+}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
new file mode 100644
index 0000000..231b644
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -0,0 +1,128 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import (
+ "math/bits"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Sort sorts a slice of any ordered type in ascending order.
+// Sort may fail to sort correctly when sorting slices of floating-point
+// numbers containing Not-a-number (NaN) values.
+// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))})
+// instead if the input may contain NaNs.
+func Sort[E constraints.Ordered](x []E) {
+ n := len(x)
+ pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+}
+
+// SortFunc sorts the slice x in ascending order as determined by the less function.
+// This sort is not guaranteed to be stable.
+//
+// SortFunc requires that less is a strict weak ordering.
+// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
+func SortFunc[E any](x []E, less func(a, b E) bool) {
+ n := len(x)
+ pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
+}
+
+// SortStableFunc sorts the slice x while keeping the original order of equal
+// elements, using less to compare elements.
+func SortStableFunc[E any](x []E, less func(a, b E) bool) {
+ stableLessFunc(x, len(x), less)
+}
+
+// IsSorted reports whether x is sorted in ascending order.
+func IsSorted[E constraints.Ordered](x []E) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if x[i] < x[i-1] {
+ return false
+ }
+ }
+ return true
+}
+
+// IsSortedFunc reports whether x is sorted in ascending order, with less as the
+// comparison function.
+func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if less(x[i], x[i-1]) {
+ return false
+ }
+ }
+ return true
+}
+
+// BinarySearch searches for target in a sorted slice and returns the position
+// where target is found, or the position where target would appear in the
+// sort order; it also returns a bool saying whether the target is really found
+// in the slice. The slice must be sorted in increasing order.
+func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
+ // Inlining is faster than calling BinarySearchFunc with a lambda.
+ n := len(x)
+ // Define x[-1] < target and x[n] >= target.
+ // Invariant: x[i-1] < target, x[j] >= target.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if x[h] < target {
+ i = h + 1 // preserves x[i-1] < target
+ } else {
+ j = h // preserves x[j] >= target
+ }
+ }
+ // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
+ return i, i < n && x[i] == target
+}
+
+// BinarySearchFunc works like BinarySearch, but uses a custom comparison
+// function. The slice must be sorted in increasing order, where "increasing"
+// is defined by cmp. cmp should return 0 if the slice element matches
+// the target, a negative number if the slice element precedes the target,
+// or a positive number if the slice element follows the target.
+// cmp must implement the same ordering as the slice, such that if
+// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
+func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) {
+ n := len(x)
+ // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
+ // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmp(x[h], target) < 0 {
+ i = h + 1 // preserves cmp(x[i - 1], target) < 0
+ } else {
+ j = h // preserves cmp(x[j], target) >= 0
+ }
+ }
+ // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
+ return i, i < n && cmp(x[i], target) == 0
+}
+
+type sortedHint int // hint for pdqsort when choosing the pivot
+
+const (
+ unknownHint sortedHint = iota
+ increasingHint
+ decreasingHint
+)
+
+// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+type xorshift uint64
+
+func (r *xorshift) Next() uint64 {
+ *r ^= *r << 13
+ *r ^= *r >> 17
+ *r ^= *r << 5
+ return uint64(*r)
+}
+
+func nextPowerOfTwo(length int) uint {
+ return 1 << bits.Len(uint(length))
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go
new file mode 100644
index 0000000..2a63247
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortfunc.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+// insertionSortLessFunc sorts data[a:b] using insertion sort.
+func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && less(data[j], data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownLessFunc implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && less(data[first+child], data[first+child+1]) {
+ child++
+ }
+ if !less(data[first+root], data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownLessFunc(data, i, hi, first, less)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownLessFunc(data, lo, i, first, less)
+ }
+}
+
+// pdqsortLessFunc sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortLessFunc(data, a, b, less)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortLessFunc(data, a, b, less)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsLessFunc(data, a, b, less)
+ limit--
+ }
+
+ pivot, hint := choosePivotLessFunc(data, a, b, less)
+ if hint == decreasingHint {
+ reverseRangeLessFunc(data, a, b, less)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortLessFunc(data, a, b, less) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !less(data[a-1], data[pivot]) {
+ mid := partitionEqualLessFunc(data, a, b, pivot, less)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortLessFunc(data, a, mid, limit, less)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortLessFunc(data, mid+1, b, limit, less)
+ b = mid
+ }
+ }
+}
+
+// partitionLessFunc does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]
=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && less(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !less(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && less(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !less(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !less(data[a], data[i]) {
+ i++
+ }
+ for i <= j && less(data[a], data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !less(data[i], data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !less(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !less(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotLessFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentLessFunc(data, i, &swaps, less)
+ j = medianAdjacentLessFunc(data, j, &swaps, less)
+ k = medianAdjacentLessFunc(data, k, &swaps, less)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianLessFunc(data, i, j, k, &swaps, less)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) {
+ if less(data[b], data[a]) {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int {
+ a, b = order2LessFunc(data, a, b, swaps, less)
+ b, c = order2LessFunc(data, b, c, swaps, less)
+ a, b = order2LessFunc(data, a, b, swaps, less)
+ return b
+}
+
+// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int {
+ return medianLessFunc(data, a-1, a, a+1, swaps, less)
+}
+
+func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortLessFunc(data, a, b, less)
+ a = b
+ b += blockSize
+ }
+ insertionSortLessFunc(data, a, n, less)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeLessFunc(data, a, a+blockSize, b, less)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeLessFunc(data, a, m, n, less)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if less(data[h], data[a]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !less(data[m], data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !less(data[p-c], data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateLessFunc(data, start, m, end, less)
+ }
+ if a < start && start < mid {
+ symMergeLessFunc(data, a, start, mid, less)
+ }
+ if mid < end && end < b {
+ symMergeLessFunc(data, mid, end, b, less)
+ }
+}
+
+// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeLessFunc(data, m-i, m, j, less)
+ i -= j
+ } else {
+ swapRangeLessFunc(data, m-i, m+j-i, i, less)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeLessFunc(data, m-i, m, i, less)
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
new file mode 100644
index 0000000..efaa1c8
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (data[j] < data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && (data[first+child] < data[first+child+1]) {
+ child++
+ }
+ if !(data[first+root] < data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownOrdered(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownOrdered(data, lo, i, first)
+ }
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortOrdered(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortOrdered(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsOrdered(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivotOrdered(data, a, b)
+ if hint == decreasingHint {
+ reverseRangeOrdered(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortOrdered(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !(data[a-1] < data[pivot]) {
+ mid := partitionEqualOrdered(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortOrdered(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortOrdered(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]
=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && (data[i] < data[a]) {
+ i++
+ }
+ for i <= j && !(data[j] < data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && (data[i] < data[a]) {
+ i++
+ }
+ for i <= j && !(data[j] < data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !(data[a] < data[i]) {
+ i++
+ }
+ for i <= j && (data[a] < data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !(data[i] < data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !(data[j] < data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !(data[j] < data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotOrdered chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentOrdered(data, i, &swaps)
+ j = medianAdjacentOrdered(data, j, &swaps)
+ k = medianAdjacentOrdered(data, k, &swaps)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianOrdered(data, i, j, k, &swaps)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
+ if data[b] < data[a] {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
+ a, b = order2Ordered(data, a, b, swaps)
+ b, c = order2Ordered(data, b, c, swaps)
+ a, b = order2Ordered(data, a, b, swaps)
+ return b
+}
+
+// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
+ return medianOrdered(data, a-1, a, a+1, swaps)
+}
+
+func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableOrdered[E constraints.Ordered](data []E, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortOrdered(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSortOrdered(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeOrdered(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeOrdered(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if data[h] < data[a] {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !(data[m] < data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !(data[p-c] < data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateOrdered(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMergeOrdered(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMergeOrdered(data, mid, end, b)
+ }
+}
+
+// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeOrdered(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRangeOrdered(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeOrdered(data, m-i, m, i)
+}
diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE
new file mode 100644
index 0000000..2a7cf70
--- /dev/null
+++ b/vendor/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
new file mode 100644
index 0000000..cf66309
--- /dev/null
+++ b/vendor/golang.org/x/net/context/context.go
@@ -0,0 +1,56 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancelation signals, and other request-scoped values across API boundaries
+// and between processes.
+// As of Go 1.7 this package is available in the standard library under the
+// name context. https://golang.org/pkg/context.
+//
+// Incoming requests to a server should create a Context, and outgoing calls to
+// servers should accept a Context. The chain of function calls between must
+// propagate the Context, optionally replacing it with a modified copy created
+// using WithDeadline, WithTimeout, WithCancel, or WithValue.
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it. The Context should be the first
+// parameter, typically named ctx:
+//
+// func DoSomething(ctx context.Context, arg Arg) error {
+// // ... use ctx ...
+// }
+//
+// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See http://blog.golang.org/context for example code for a server that uses
+// Contexts.
+package context // import "golang.org/x/net/context"
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline. It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+ return background
+}
+
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// it's unclear which Context to use or it is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter). TODO is recognized by static analysis tools that determine
+// whether Contexts are propagated correctly in a program.
+func TODO() Context {
+ return todo
+}
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
new file mode 100644
index 0000000..0c1b867
--- /dev/null
+++ b/vendor/golang.org/x/net/context/go17.go
@@ -0,0 +1,72 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.7
+
+package context
+
+import (
+ "context" // standard library's context, as of Go 1.7
+ "time"
+)
+
+var (
+ todo = context.TODO()
+ background = context.Background()
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = context.Canceled
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = context.DeadlineExceeded
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ ctx, f := context.WithCancel(parent)
+ return ctx, f
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ ctx, f := context.WithDeadline(parent, deadline)
+ return ctx, f
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return context.WithValue(parent, key, val)
+}
diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go
new file mode 100644
index 0000000..e31e35a
--- /dev/null
+++ b/vendor/golang.org/x/net/context/go19.go
@@ -0,0 +1,20 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.9
+
+package context
+
+import "context" // standard library's context, as of Go 1.7
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context = context.Context
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc = context.CancelFunc
diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go
new file mode 100644
index 0000000..065ff3d
--- /dev/null
+++ b/vendor/golang.org/x/net/context/pre_go17.go
@@ -0,0 +1,300 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.7
+
+package context
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+)
+
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case background:
+ return "context.Background"
+ case todo:
+ return "context.TODO"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ background = new(emptyCtx)
+ todo = new(emptyCtx)
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = errors.New("context canceled")
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = errors.New("context deadline exceeded")
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ c := newCancelCtx(parent)
+ propagateCancel(parent, c)
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// newCancelCtx returns an initialized cancelCtx.
+func newCancelCtx(parent Context) *cancelCtx {
+ return &cancelCtx{
+ Context: parent,
+ done: make(chan struct{}),
+ }
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent Context, child canceler) {
+ if parent.Done() == nil {
+ return // parent is never canceled
+ }
+ if p, ok := parentCancelCtx(parent); ok {
+ p.mu.Lock()
+ if p.err != nil {
+ // parent has already been canceled
+ child.cancel(false, p.err)
+ } else {
+ if p.children == nil {
+ p.children = make(map[canceler]bool)
+ }
+ p.children[child] = true
+ }
+ p.mu.Unlock()
+ } else {
+ go func() {
+ select {
+ case <-parent.Done():
+ child.cancel(false, parent.Err())
+ case <-child.Done():
+ }
+ }()
+ }
+}
+
+// parentCancelCtx follows a chain of parent references until it finds a
+// *cancelCtx. This function understands how each of the concrete types in this
+// package represents its parent.
+func parentCancelCtx(parent Context) (*cancelCtx, bool) {
+ for {
+ switch c := parent.(type) {
+ case *cancelCtx:
+ return c, true
+ case *timerCtx:
+ return c.cancelCtx, true
+ case *valueCtx:
+ parent = c.Context
+ default:
+ return nil, false
+ }
+ }
+}
+
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+ p, ok := parentCancelCtx(parent)
+ if !ok {
+ return
+ }
+ p.mu.Lock()
+ if p.children != nil {
+ delete(p.children, child)
+ }
+ p.mu.Unlock()
+}
+
+// A canceler is a context type that can be canceled directly. The
+// implementations are *cancelCtx and *timerCtx.
+type canceler interface {
+ cancel(removeFromParent bool, err error)
+ Done() <-chan struct{}
+}
+
+// A cancelCtx can be canceled. When canceled, it also cancels any children
+// that implement canceler.
+type cancelCtx struct {
+ Context
+
+ done chan struct{} // closed by the first cancel call.
+
+ mu sync.Mutex
+ children map[canceler]bool // set to nil by the first cancel call
+ err error // set to non-nil by the first cancel call
+}
+
+func (c *cancelCtx) Done() <-chan struct{} {
+ return c.done
+}
+
+func (c *cancelCtx) Err() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.err
+}
+
+func (c *cancelCtx) String() string {
+ return fmt.Sprintf("%v.WithCancel", c.Context)
+}
+
+// cancel closes c.done, cancels each of c's children, and, if
+// removeFromParent is true, removes c from its parent's children.
+func (c *cancelCtx) cancel(removeFromParent bool, err error) {
+ if err == nil {
+ panic("context: internal error: missing cancel error")
+ }
+ c.mu.Lock()
+ if c.err != nil {
+ c.mu.Unlock()
+ return // already canceled
+ }
+ c.err = err
+ close(c.done)
+ for child := range c.children {
+ // NOTE: acquiring the child's lock while holding parent's lock.
+ child.cancel(false, err)
+ }
+ c.children = nil
+ c.mu.Unlock()
+
+ if removeFromParent {
+ removeChild(c.Context, c)
+ }
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+ // The current deadline is already sooner than the new one.
+ return WithCancel(parent)
+ }
+ c := &timerCtx{
+ cancelCtx: newCancelCtx(parent),
+ deadline: deadline,
+ }
+ propagateCancel(parent, c)
+ d := deadline.Sub(time.Now())
+ if d <= 0 {
+ c.cancel(true, DeadlineExceeded) // deadline has already passed
+ return c, func() { c.cancel(true, Canceled) }
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err == nil {
+ c.timer = time.AfterFunc(d, func() {
+ c.cancel(true, DeadlineExceeded)
+ })
+ }
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
+// implement Done and Err. It implements cancel by stopping its timer then
+// delegating to cancelCtx.cancel.
+type timerCtx struct {
+ *cancelCtx
+ timer *time.Timer // Under cancelCtx.mu.
+
+ deadline time.Time
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
+ return c.deadline, true
+}
+
+func (c *timerCtx) String() string {
+ return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
+}
+
+func (c *timerCtx) cancel(removeFromParent bool, err error) {
+ c.cancelCtx.cancel(false, err)
+ if removeFromParent {
+ // Remove this timerCtx from its parent cancelCtx's children.
+ removeChild(c.cancelCtx.Context, c)
+ }
+ c.mu.Lock()
+ if c.timer != nil {
+ c.timer.Stop()
+ c.timer = nil
+ }
+ c.mu.Unlock()
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return &valueCtx{parent, key, val}
+}
+
+// A valueCtx carries a key-value pair. It implements Value for that key and
+// delegates all other calls to the embedded Context.
+type valueCtx struct {
+ Context
+ key, val interface{}
+}
+
+func (c *valueCtx) String() string {
+ return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
+}
+
+func (c *valueCtx) Value(key interface{}) interface{} {
+ if c.key == key {
+ return c.val
+ }
+ return c.Context.Value(key)
+}
diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go
new file mode 100644
index 0000000..ec5a638
--- /dev/null
+++ b/vendor/golang.org/x/net/context/pre_go19.go
@@ -0,0 +1,109 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.9
+
+package context
+
+import "time"
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ //
+ // WithCancel arranges for Done to be closed when cancel is called;
+ // WithDeadline arranges for Done to be closed when the deadline
+ // expires; WithTimeout arranges for Done to be closed when the timeout
+ // elapses.
+ //
+ // Done is provided for use in select statements:
+ //
+ // // Stream generates values with DoSomething and sends them to out
+ // // until DoSomething returns an error or ctx.Done is closed.
+ // func Stream(ctx context.Context, out chan<- Value) error {
+ // for {
+ // v, err := DoSomething(ctx)
+ // if err != nil {
+ // return err
+ // }
+ // select {
+ // case <-ctx.Done():
+ // return ctx.Err()
+ // case out <- v:
+ // }
+ // }
+ // }
+ //
+ // See http://blog.golang.org/pipelines for more examples of how to use
+ // a Done channel for cancelation.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ //
+ // A key identifies a specific value in a Context. Functions that wish
+ // to store values in Context typically allocate a key in a global
+ // variable then use that key as the argument to context.WithValue and
+ // Context.Value. A key can be any type that supports equality;
+ // packages should define keys as an unexported type to avoid
+ // collisions.
+ //
+ // Packages that define a Context key should provide type-safe accessors
+ // for the values stores using that key:
+ //
+ // // Package user defines a User type that's stored in Contexts.
+ // package user
+ //
+ // import "golang.org/x/net/context"
+ //
+ // // User is the type of value stored in the Contexts.
+ // type User struct {...}
+ //
+ // // key is an unexported type for keys defined in this package.
+ // // This prevents collisions with keys defined in other packages.
+ // type key int
+ //
+ // // userKey is the key for user.User values in Contexts. It is
+ // // unexported; clients use user.NewContext and user.FromContext
+ // // instead of using this key directly.
+ // var userKey key = 0
+ //
+ // // NewContext returns a new Context that carries value u.
+ // func NewContext(ctx context.Context, u *User) context.Context {
+ // return context.WithValue(ctx, userKey, u)
+ // }
+ //
+ // // FromContext returns the User value stored in ctx, if any.
+ // func FromContext(ctx context.Context) (*User, bool) {
+ // u, ok := ctx.Value(userKey).(*User)
+ // return u, ok
+ // }
+ Value(key interface{}) interface{}
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go
new file mode 100644
index 0000000..cd0a8ac
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/atom.go
@@ -0,0 +1,78 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package atom provides integer codes (also known as atoms) for a fixed set of
+// frequently occurring HTML strings: tag names and attribute keys such as "p"
+// and "id".
+//
+// Sharing an atom's name between all elements with the same tag can result in
+// fewer string allocations when tokenizing and parsing HTML. Integer
+// comparisons are also generally faster than string comparisons.
+//
+// The value of an atom's particular code is not guaranteed to stay the same
+// between versions of this package. Neither is any ordering guaranteed:
+// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to
+// be dense. The only guarantees are that e.g. looking up "div" will yield
+// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0.
+package atom // import "golang.org/x/net/html/atom"
+
+// Atom is an integer code for a string. The zero value maps to "".
+type Atom uint32
+
+// String returns the atom's name.
+func (a Atom) String() string {
+ start := uint32(a >> 8)
+ n := uint32(a & 0xff)
+ if start+n > uint32(len(atomText)) {
+ return ""
+ }
+ return atomText[start : start+n]
+}
+
+func (a Atom) string() string {
+ return atomText[a>>8 : a>>8+a&0xff]
+}
+
+// fnv computes the FNV hash with an arbitrary starting value h.
+func fnv(h uint32, s []byte) uint32 {
+ for i := range s {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+func match(s string, t []byte) bool {
+ for i, c := range t {
+ if s[i] != c {
+ return false
+ }
+ }
+ return true
+}
+
+// Lookup returns the atom whose name is s. It returns zero if there is no
+// such atom. The lookup is case sensitive.
+func Lookup(s []byte) Atom {
+ if len(s) == 0 || len(s) > maxAtomLen {
+ return 0
+ }
+ h := fnv(hash0, s)
+ if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
+ return a
+ }
+ if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
+ return a
+ }
+ return 0
+}
+
+// String returns a string whose contents are equal to s. In that sense, it is
+// equivalent to string(s) but may be more efficient.
+func String(s []byte) string {
+ if a := Lookup(s); a != 0 {
+ return a.String()
+ }
+ return string(s)
+}
diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go
new file mode 100644
index 0000000..2a93886
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/table.go
@@ -0,0 +1,783 @@
+// Code generated by go generate gen.go; DO NOT EDIT.
+
+//go:generate go run gen.go
+
+package atom
+
+const (
+ A Atom = 0x1
+ Abbr Atom = 0x4
+ Accept Atom = 0x1a06
+ AcceptCharset Atom = 0x1a0e
+ Accesskey Atom = 0x2c09
+ Acronym Atom = 0xaa07
+ Action Atom = 0x27206
+ Address Atom = 0x6f307
+ Align Atom = 0xb105
+ Allowfullscreen Atom = 0x2080f
+ Allowpaymentrequest Atom = 0xc113
+ Allowusermedia Atom = 0xdd0e
+ Alt Atom = 0xf303
+ Annotation Atom = 0x1c90a
+ AnnotationXml Atom = 0x1c90e
+ Applet Atom = 0x31906
+ Area Atom = 0x35604
+ Article Atom = 0x3fc07
+ As Atom = 0x3c02
+ Aside Atom = 0x10705
+ Async Atom = 0xff05
+ Audio Atom = 0x11505
+ Autocomplete Atom = 0x2780c
+ Autofocus Atom = 0x12109
+ Autoplay Atom = 0x13c08
+ B Atom = 0x101
+ Base Atom = 0x3b04
+ Basefont Atom = 0x3b08
+ Bdi Atom = 0xba03
+ Bdo Atom = 0x14b03
+ Bgsound Atom = 0x15e07
+ Big Atom = 0x17003
+ Blink Atom = 0x17305
+ Blockquote Atom = 0x1870a
+ Body Atom = 0x2804
+ Br Atom = 0x202
+ Button Atom = 0x19106
+ Canvas Atom = 0x10306
+ Caption Atom = 0x23107
+ Center Atom = 0x22006
+ Challenge Atom = 0x29b09
+ Charset Atom = 0x2107
+ Checked Atom = 0x47907
+ Cite Atom = 0x19c04
+ Class Atom = 0x56405
+ Code Atom = 0x5c504
+ Col Atom = 0x1ab03
+ Colgroup Atom = 0x1ab08
+ Color Atom = 0x1bf05
+ Cols Atom = 0x1c404
+ Colspan Atom = 0x1c407
+ Command Atom = 0x1d707
+ Content Atom = 0x58b07
+ Contenteditable Atom = 0x58b0f
+ Contextmenu Atom = 0x3800b
+ Controls Atom = 0x1de08
+ Coords Atom = 0x1ea06
+ Crossorigin Atom = 0x1fb0b
+ Data Atom = 0x4a504
+ Datalist Atom = 0x4a508
+ Datetime Atom = 0x2b808
+ Dd Atom = 0x2d702
+ Default Atom = 0x10a07
+ Defer Atom = 0x5c705
+ Del Atom = 0x45203
+ Desc Atom = 0x56104
+ Details Atom = 0x7207
+ Dfn Atom = 0x8703
+ Dialog Atom = 0xbb06
+ Dir Atom = 0x9303
+ Dirname Atom = 0x9307
+ Disabled Atom = 0x16408
+ Div Atom = 0x16b03
+ Dl Atom = 0x5e602
+ Download Atom = 0x46308
+ Draggable Atom = 0x17a09
+ Dropzone Atom = 0x40508
+ Dt Atom = 0x64b02
+ Em Atom = 0x6e02
+ Embed Atom = 0x6e05
+ Enctype Atom = 0x28d07
+ Face Atom = 0x21e04
+ Fieldset Atom = 0x22608
+ Figcaption Atom = 0x22e0a
+ Figure Atom = 0x24806
+ Font Atom = 0x3f04
+ Footer Atom = 0xf606
+ For Atom = 0x25403
+ ForeignObject Atom = 0x2540d
+ Foreignobject Atom = 0x2610d
+ Form Atom = 0x26e04
+ Formaction Atom = 0x26e0a
+ Formenctype Atom = 0x2890b
+ Formmethod Atom = 0x2a40a
+ Formnovalidate Atom = 0x2ae0e
+ Formtarget Atom = 0x2c00a
+ Frame Atom = 0x8b05
+ Frameset Atom = 0x8b08
+ H1 Atom = 0x15c02
+ H2 Atom = 0x2de02
+ H3 Atom = 0x30d02
+ H4 Atom = 0x34502
+ H5 Atom = 0x34f02
+ H6 Atom = 0x64d02
+ Head Atom = 0x33104
+ Header Atom = 0x33106
+ Headers Atom = 0x33107
+ Height Atom = 0x5206
+ Hgroup Atom = 0x2ca06
+ Hidden Atom = 0x2d506
+ High Atom = 0x2db04
+ Hr Atom = 0x15702
+ Href Atom = 0x2e004
+ Hreflang Atom = 0x2e008
+ Html Atom = 0x5604
+ HttpEquiv Atom = 0x2e80a
+ I Atom = 0x601
+ Icon Atom = 0x58a04
+ Id Atom = 0x10902
+ Iframe Atom = 0x2fc06
+ Image Atom = 0x30205
+ Img Atom = 0x30703
+ Input Atom = 0x44b05
+ Inputmode Atom = 0x44b09
+ Ins Atom = 0x20403
+ Integrity Atom = 0x23f09
+ Is Atom = 0x16502
+ Isindex Atom = 0x30f07
+ Ismap Atom = 0x31605
+ Itemid Atom = 0x38b06
+ Itemprop Atom = 0x19d08
+ Itemref Atom = 0x3cd07
+ Itemscope Atom = 0x67109
+ Itemtype Atom = 0x31f08
+ Kbd Atom = 0xb903
+ Keygen Atom = 0x3206
+ Keytype Atom = 0xd607
+ Kind Atom = 0x17704
+ Label Atom = 0x5905
+ Lang Atom = 0x2e404
+ Legend Atom = 0x18106
+ Li Atom = 0xb202
+ Link Atom = 0x17404
+ List Atom = 0x4a904
+ Listing Atom = 0x4a907
+ Loop Atom = 0x5d04
+ Low Atom = 0xc303
+ Main Atom = 0x1004
+ Malignmark Atom = 0xb00a
+ Manifest Atom = 0x6d708
+ Map Atom = 0x31803
+ Mark Atom = 0xb604
+ Marquee Atom = 0x32707
+ Math Atom = 0x32e04
+ Max Atom = 0x33d03
+ Maxlength Atom = 0x33d09
+ Media Atom = 0xe605
+ Mediagroup Atom = 0xe60a
+ Menu Atom = 0x38704
+ Menuitem Atom = 0x38708
+ Meta Atom = 0x4b804
+ Meter Atom = 0x9805
+ Method Atom = 0x2a806
+ Mglyph Atom = 0x30806
+ Mi Atom = 0x34702
+ Min Atom = 0x34703
+ Minlength Atom = 0x34709
+ Mn Atom = 0x2b102
+ Mo Atom = 0xa402
+ Ms Atom = 0x67402
+ Mtext Atom = 0x35105
+ Multiple Atom = 0x35f08
+ Muted Atom = 0x36705
+ Name Atom = 0x9604
+ Nav Atom = 0x1303
+ Nobr Atom = 0x3704
+ Noembed Atom = 0x6c07
+ Noframes Atom = 0x8908
+ Nomodule Atom = 0xa208
+ Nonce Atom = 0x1a605
+ Noscript Atom = 0x21608
+ Novalidate Atom = 0x2b20a
+ Object Atom = 0x26806
+ Ol Atom = 0x13702
+ Onabort Atom = 0x19507
+ Onafterprint Atom = 0x2360c
+ Onautocomplete Atom = 0x2760e
+ Onautocompleteerror Atom = 0x27613
+ Onauxclick Atom = 0x61f0a
+ Onbeforeprint Atom = 0x69e0d
+ Onbeforeunload Atom = 0x6e70e
+ Onblur Atom = 0x56d06
+ Oncancel Atom = 0x11908
+ Oncanplay Atom = 0x14d09
+ Oncanplaythrough Atom = 0x14d10
+ Onchange Atom = 0x41b08
+ Onclick Atom = 0x2f507
+ Onclose Atom = 0x36c07
+ Oncontextmenu Atom = 0x37e0d
+ Oncopy Atom = 0x39106
+ Oncuechange Atom = 0x3970b
+ Oncut Atom = 0x3a205
+ Ondblclick Atom = 0x3a70a
+ Ondrag Atom = 0x3b106
+ Ondragend Atom = 0x3b109
+ Ondragenter Atom = 0x3ba0b
+ Ondragexit Atom = 0x3c50a
+ Ondragleave Atom = 0x3df0b
+ Ondragover Atom = 0x3ea0a
+ Ondragstart Atom = 0x3f40b
+ Ondrop Atom = 0x40306
+ Ondurationchange Atom = 0x41310
+ Onemptied Atom = 0x40a09
+ Onended Atom = 0x42307
+ Onerror Atom = 0x42a07
+ Onfocus Atom = 0x43107
+ Onhashchange Atom = 0x43d0c
+ Oninput Atom = 0x44907
+ Oninvalid Atom = 0x45509
+ Onkeydown Atom = 0x45e09
+ Onkeypress Atom = 0x46b0a
+ Onkeyup Atom = 0x48007
+ Onlanguagechange Atom = 0x48d10
+ Onload Atom = 0x49d06
+ Onloadeddata Atom = 0x49d0c
+ Onloadedmetadata Atom = 0x4b010
+ Onloadend Atom = 0x4c609
+ Onloadstart Atom = 0x4cf0b
+ Onmessage Atom = 0x4da09
+ Onmessageerror Atom = 0x4da0e
+ Onmousedown Atom = 0x4e80b
+ Onmouseenter Atom = 0x4f30c
+ Onmouseleave Atom = 0x4ff0c
+ Onmousemove Atom = 0x50b0b
+ Onmouseout Atom = 0x5160a
+ Onmouseover Atom = 0x5230b
+ Onmouseup Atom = 0x52e09
+ Onmousewheel Atom = 0x53c0c
+ Onoffline Atom = 0x54809
+ Ononline Atom = 0x55108
+ Onpagehide Atom = 0x5590a
+ Onpageshow Atom = 0x5730a
+ Onpaste Atom = 0x57f07
+ Onpause Atom = 0x59a07
+ Onplay Atom = 0x5a406
+ Onplaying Atom = 0x5a409
+ Onpopstate Atom = 0x5ad0a
+ Onprogress Atom = 0x5b70a
+ Onratechange Atom = 0x5cc0c
+ Onrejectionhandled Atom = 0x5d812
+ Onreset Atom = 0x5ea07
+ Onresize Atom = 0x5f108
+ Onscroll Atom = 0x60008
+ Onsecuritypolicyviolation Atom = 0x60819
+ Onseeked Atom = 0x62908
+ Onseeking Atom = 0x63109
+ Onselect Atom = 0x63a08
+ Onshow Atom = 0x64406
+ Onsort Atom = 0x64f06
+ Onstalled Atom = 0x65909
+ Onstorage Atom = 0x66209
+ Onsubmit Atom = 0x66b08
+ Onsuspend Atom = 0x67b09
+ Ontimeupdate Atom = 0x400c
+ Ontoggle Atom = 0x68408
+ Onunhandledrejection Atom = 0x68c14
+ Onunload Atom = 0x6ab08
+ Onvolumechange Atom = 0x6b30e
+ Onwaiting Atom = 0x6c109
+ Onwheel Atom = 0x6ca07
+ Open Atom = 0x1a304
+ Optgroup Atom = 0x5f08
+ Optimum Atom = 0x6d107
+ Option Atom = 0x6e306
+ Output Atom = 0x51d06
+ P Atom = 0xc01
+ Param Atom = 0xc05
+ Pattern Atom = 0x6607
+ Picture Atom = 0x7b07
+ Ping Atom = 0xef04
+ Placeholder Atom = 0x1310b
+ Plaintext Atom = 0x1b209
+ Playsinline Atom = 0x1400b
+ Poster Atom = 0x2cf06
+ Pre Atom = 0x47003
+ Preload Atom = 0x48607
+ Progress Atom = 0x5b908
+ Prompt Atom = 0x53606
+ Public Atom = 0x58606
+ Q Atom = 0xcf01
+ Radiogroup Atom = 0x30a
+ Rb Atom = 0x3a02
+ Readonly Atom = 0x35708
+ Referrerpolicy Atom = 0x3d10e
+ Rel Atom = 0x48703
+ Required Atom = 0x24c08
+ Reversed Atom = 0x8008
+ Rows Atom = 0x9c04
+ Rowspan Atom = 0x9c07
+ Rp Atom = 0x23c02
+ Rt Atom = 0x19a02
+ Rtc Atom = 0x19a03
+ Ruby Atom = 0xfb04
+ S Atom = 0x2501
+ Samp Atom = 0x7804
+ Sandbox Atom = 0x12907
+ Scope Atom = 0x67505
+ Scoped Atom = 0x67506
+ Script Atom = 0x21806
+ Seamless Atom = 0x37108
+ Section Atom = 0x56807
+ Select Atom = 0x63c06
+ Selected Atom = 0x63c08
+ Shape Atom = 0x1e505
+ Size Atom = 0x5f504
+ Sizes Atom = 0x5f505
+ Slot Atom = 0x1ef04
+ Small Atom = 0x20605
+ Sortable Atom = 0x65108
+ Sorted Atom = 0x33706
+ Source Atom = 0x37806
+ Spacer Atom = 0x43706
+ Span Atom = 0x9f04
+ Spellcheck Atom = 0x4740a
+ Src Atom = 0x5c003
+ Srcdoc Atom = 0x5c006
+ Srclang Atom = 0x5f907
+ Srcset Atom = 0x6f906
+ Start Atom = 0x3fa05
+ Step Atom = 0x58304
+ Strike Atom = 0xd206
+ Strong Atom = 0x6dd06
+ Style Atom = 0x6ff05
+ Sub Atom = 0x66d03
+ Summary Atom = 0x70407
+ Sup Atom = 0x70b03
+ Svg Atom = 0x70e03
+ System Atom = 0x71106
+ Tabindex Atom = 0x4be08
+ Table Atom = 0x59505
+ Target Atom = 0x2c406
+ Tbody Atom = 0x2705
+ Td Atom = 0x9202
+ Template Atom = 0x71408
+ Textarea Atom = 0x35208
+ Tfoot Atom = 0xf505
+ Th Atom = 0x15602
+ Thead Atom = 0x33005
+ Time Atom = 0x4204
+ Title Atom = 0x11005
+ Tr Atom = 0xcc02
+ Track Atom = 0x1ba05
+ Translate Atom = 0x1f209
+ Tt Atom = 0x6802
+ Type Atom = 0xd904
+ Typemustmatch Atom = 0x2900d
+ U Atom = 0xb01
+ Ul Atom = 0xa702
+ Updateviacache Atom = 0x460e
+ Usemap Atom = 0x59e06
+ Value Atom = 0x1505
+ Var Atom = 0x16d03
+ Video Atom = 0x2f105
+ Wbr Atom = 0x57c03
+ Width Atom = 0x64905
+ Workertype Atom = 0x71c0a
+ Wrap Atom = 0x72604
+ Xmp Atom = 0x12f03
+)
+
+const hash0 = 0x81cdf10e
+
+const maxAtomLen = 25
+
+var table = [1 << 9]Atom{
+ 0x1: 0xe60a, // mediagroup
+ 0x2: 0x2e404, // lang
+ 0x4: 0x2c09, // accesskey
+ 0x5: 0x8b08, // frameset
+ 0x7: 0x63a08, // onselect
+ 0x8: 0x71106, // system
+ 0xa: 0x64905, // width
+ 0xc: 0x2890b, // formenctype
+ 0xd: 0x13702, // ol
+ 0xe: 0x3970b, // oncuechange
+ 0x10: 0x14b03, // bdo
+ 0x11: 0x11505, // audio
+ 0x12: 0x17a09, // draggable
+ 0x14: 0x2f105, // video
+ 0x15: 0x2b102, // mn
+ 0x16: 0x38704, // menu
+ 0x17: 0x2cf06, // poster
+ 0x19: 0xf606, // footer
+ 0x1a: 0x2a806, // method
+ 0x1b: 0x2b808, // datetime
+ 0x1c: 0x19507, // onabort
+ 0x1d: 0x460e, // updateviacache
+ 0x1e: 0xff05, // async
+ 0x1f: 0x49d06, // onload
+ 0x21: 0x11908, // oncancel
+ 0x22: 0x62908, // onseeked
+ 0x23: 0x30205, // image
+ 0x24: 0x5d812, // onrejectionhandled
+ 0x26: 0x17404, // link
+ 0x27: 0x51d06, // output
+ 0x28: 0x33104, // head
+ 0x29: 0x4ff0c, // onmouseleave
+ 0x2a: 0x57f07, // onpaste
+ 0x2b: 0x5a409, // onplaying
+ 0x2c: 0x1c407, // colspan
+ 0x2f: 0x1bf05, // color
+ 0x30: 0x5f504, // size
+ 0x31: 0x2e80a, // http-equiv
+ 0x33: 0x601, // i
+ 0x34: 0x5590a, // onpagehide
+ 0x35: 0x68c14, // onunhandledrejection
+ 0x37: 0x42a07, // onerror
+ 0x3a: 0x3b08, // basefont
+ 0x3f: 0x1303, // nav
+ 0x40: 0x17704, // kind
+ 0x41: 0x35708, // readonly
+ 0x42: 0x30806, // mglyph
+ 0x44: 0xb202, // li
+ 0x46: 0x2d506, // hidden
+ 0x47: 0x70e03, // svg
+ 0x48: 0x58304, // step
+ 0x49: 0x23f09, // integrity
+ 0x4a: 0x58606, // public
+ 0x4c: 0x1ab03, // col
+ 0x4d: 0x1870a, // blockquote
+ 0x4e: 0x34f02, // h5
+ 0x50: 0x5b908, // progress
+ 0x51: 0x5f505, // sizes
+ 0x52: 0x34502, // h4
+ 0x56: 0x33005, // thead
+ 0x57: 0xd607, // keytype
+ 0x58: 0x5b70a, // onprogress
+ 0x59: 0x44b09, // inputmode
+ 0x5a: 0x3b109, // ondragend
+ 0x5d: 0x3a205, // oncut
+ 0x5e: 0x43706, // spacer
+ 0x5f: 0x1ab08, // colgroup
+ 0x62: 0x16502, // is
+ 0x65: 0x3c02, // as
+ 0x66: 0x54809, // onoffline
+ 0x67: 0x33706, // sorted
+ 0x69: 0x48d10, // onlanguagechange
+ 0x6c: 0x43d0c, // onhashchange
+ 0x6d: 0x9604, // name
+ 0x6e: 0xf505, // tfoot
+ 0x6f: 0x56104, // desc
+ 0x70: 0x33d03, // max
+ 0x72: 0x1ea06, // coords
+ 0x73: 0x30d02, // h3
+ 0x74: 0x6e70e, // onbeforeunload
+ 0x75: 0x9c04, // rows
+ 0x76: 0x63c06, // select
+ 0x77: 0x9805, // meter
+ 0x78: 0x38b06, // itemid
+ 0x79: 0x53c0c, // onmousewheel
+ 0x7a: 0x5c006, // srcdoc
+ 0x7d: 0x1ba05, // track
+ 0x7f: 0x31f08, // itemtype
+ 0x82: 0xa402, // mo
+ 0x83: 0x41b08, // onchange
+ 0x84: 0x33107, // headers
+ 0x85: 0x5cc0c, // onratechange
+ 0x86: 0x60819, // onsecuritypolicyviolation
+ 0x88: 0x4a508, // datalist
+ 0x89: 0x4e80b, // onmousedown
+ 0x8a: 0x1ef04, // slot
+ 0x8b: 0x4b010, // onloadedmetadata
+ 0x8c: 0x1a06, // accept
+ 0x8d: 0x26806, // object
+ 0x91: 0x6b30e, // onvolumechange
+ 0x92: 0x2107, // charset
+ 0x93: 0x27613, // onautocompleteerror
+ 0x94: 0xc113, // allowpaymentrequest
+ 0x95: 0x2804, // body
+ 0x96: 0x10a07, // default
+ 0x97: 0x63c08, // selected
+ 0x98: 0x21e04, // face
+ 0x99: 0x1e505, // shape
+ 0x9b: 0x68408, // ontoggle
+ 0x9e: 0x64b02, // dt
+ 0x9f: 0xb604, // mark
+ 0xa1: 0xb01, // u
+ 0xa4: 0x6ab08, // onunload
+ 0xa5: 0x5d04, // loop
+ 0xa6: 0x16408, // disabled
+ 0xaa: 0x42307, // onended
+ 0xab: 0xb00a, // malignmark
+ 0xad: 0x67b09, // onsuspend
+ 0xae: 0x35105, // mtext
+ 0xaf: 0x64f06, // onsort
+ 0xb0: 0x19d08, // itemprop
+ 0xb3: 0x67109, // itemscope
+ 0xb4: 0x17305, // blink
+ 0xb6: 0x3b106, // ondrag
+ 0xb7: 0xa702, // ul
+ 0xb8: 0x26e04, // form
+ 0xb9: 0x12907, // sandbox
+ 0xba: 0x8b05, // frame
+ 0xbb: 0x1505, // value
+ 0xbc: 0x66209, // onstorage
+ 0xbf: 0xaa07, // acronym
+ 0xc0: 0x19a02, // rt
+ 0xc2: 0x202, // br
+ 0xc3: 0x22608, // fieldset
+ 0xc4: 0x2900d, // typemustmatch
+ 0xc5: 0xa208, // nomodule
+ 0xc6: 0x6c07, // noembed
+ 0xc7: 0x69e0d, // onbeforeprint
+ 0xc8: 0x19106, // button
+ 0xc9: 0x2f507, // onclick
+ 0xca: 0x70407, // summary
+ 0xcd: 0xfb04, // ruby
+ 0xce: 0x56405, // class
+ 0xcf: 0x3f40b, // ondragstart
+ 0xd0: 0x23107, // caption
+ 0xd4: 0xdd0e, // allowusermedia
+ 0xd5: 0x4cf0b, // onloadstart
+ 0xd9: 0x16b03, // div
+ 0xda: 0x4a904, // list
+ 0xdb: 0x32e04, // math
+ 0xdc: 0x44b05, // input
+ 0xdf: 0x3ea0a, // ondragover
+ 0xe0: 0x2de02, // h2
+ 0xe2: 0x1b209, // plaintext
+ 0xe4: 0x4f30c, // onmouseenter
+ 0xe7: 0x47907, // checked
+ 0xe8: 0x47003, // pre
+ 0xea: 0x35f08, // multiple
+ 0xeb: 0xba03, // bdi
+ 0xec: 0x33d09, // maxlength
+ 0xed: 0xcf01, // q
+ 0xee: 0x61f0a, // onauxclick
+ 0xf0: 0x57c03, // wbr
+ 0xf2: 0x3b04, // base
+ 0xf3: 0x6e306, // option
+ 0xf5: 0x41310, // ondurationchange
+ 0xf7: 0x8908, // noframes
+ 0xf9: 0x40508, // dropzone
+ 0xfb: 0x67505, // scope
+ 0xfc: 0x8008, // reversed
+ 0xfd: 0x3ba0b, // ondragenter
+ 0xfe: 0x3fa05, // start
+ 0xff: 0x12f03, // xmp
+ 0x100: 0x5f907, // srclang
+ 0x101: 0x30703, // img
+ 0x104: 0x101, // b
+ 0x105: 0x25403, // for
+ 0x106: 0x10705, // aside
+ 0x107: 0x44907, // oninput
+ 0x108: 0x35604, // area
+ 0x109: 0x2a40a, // formmethod
+ 0x10a: 0x72604, // wrap
+ 0x10c: 0x23c02, // rp
+ 0x10d: 0x46b0a, // onkeypress
+ 0x10e: 0x6802, // tt
+ 0x110: 0x34702, // mi
+ 0x111: 0x36705, // muted
+ 0x112: 0xf303, // alt
+ 0x113: 0x5c504, // code
+ 0x114: 0x6e02, // em
+ 0x115: 0x3c50a, // ondragexit
+ 0x117: 0x9f04, // span
+ 0x119: 0x6d708, // manifest
+ 0x11a: 0x38708, // menuitem
+ 0x11b: 0x58b07, // content
+ 0x11d: 0x6c109, // onwaiting
+ 0x11f: 0x4c609, // onloadend
+ 0x121: 0x37e0d, // oncontextmenu
+ 0x123: 0x56d06, // onblur
+ 0x124: 0x3fc07, // article
+ 0x125: 0x9303, // dir
+ 0x126: 0xef04, // ping
+ 0x127: 0x24c08, // required
+ 0x128: 0x45509, // oninvalid
+ 0x129: 0xb105, // align
+ 0x12b: 0x58a04, // icon
+ 0x12c: 0x64d02, // h6
+ 0x12d: 0x1c404, // cols
+ 0x12e: 0x22e0a, // figcaption
+ 0x12f: 0x45e09, // onkeydown
+ 0x130: 0x66b08, // onsubmit
+ 0x131: 0x14d09, // oncanplay
+ 0x132: 0x70b03, // sup
+ 0x133: 0xc01, // p
+ 0x135: 0x40a09, // onemptied
+ 0x136: 0x39106, // oncopy
+ 0x137: 0x19c04, // cite
+ 0x138: 0x3a70a, // ondblclick
+ 0x13a: 0x50b0b, // onmousemove
+ 0x13c: 0x66d03, // sub
+ 0x13d: 0x48703, // rel
+ 0x13e: 0x5f08, // optgroup
+ 0x142: 0x9c07, // rowspan
+ 0x143: 0x37806, // source
+ 0x144: 0x21608, // noscript
+ 0x145: 0x1a304, // open
+ 0x146: 0x20403, // ins
+ 0x147: 0x2540d, // foreignObject
+ 0x148: 0x5ad0a, // onpopstate
+ 0x14a: 0x28d07, // enctype
+ 0x14b: 0x2760e, // onautocomplete
+ 0x14c: 0x35208, // textarea
+ 0x14e: 0x2780c, // autocomplete
+ 0x14f: 0x15702, // hr
+ 0x150: 0x1de08, // controls
+ 0x151: 0x10902, // id
+ 0x153: 0x2360c, // onafterprint
+ 0x155: 0x2610d, // foreignobject
+ 0x156: 0x32707, // marquee
+ 0x157: 0x59a07, // onpause
+ 0x158: 0x5e602, // dl
+ 0x159: 0x5206, // height
+ 0x15a: 0x34703, // min
+ 0x15b: 0x9307, // dirname
+ 0x15c: 0x1f209, // translate
+ 0x15d: 0x5604, // html
+ 0x15e: 0x34709, // minlength
+ 0x15f: 0x48607, // preload
+ 0x160: 0x71408, // template
+ 0x161: 0x3df0b, // ondragleave
+ 0x162: 0x3a02, // rb
+ 0x164: 0x5c003, // src
+ 0x165: 0x6dd06, // strong
+ 0x167: 0x7804, // samp
+ 0x168: 0x6f307, // address
+ 0x169: 0x55108, // ononline
+ 0x16b: 0x1310b, // placeholder
+ 0x16c: 0x2c406, // target
+ 0x16d: 0x20605, // small
+ 0x16e: 0x6ca07, // onwheel
+ 0x16f: 0x1c90a, // annotation
+ 0x170: 0x4740a, // spellcheck
+ 0x171: 0x7207, // details
+ 0x172: 0x10306, // canvas
+ 0x173: 0x12109, // autofocus
+ 0x174: 0xc05, // param
+ 0x176: 0x46308, // download
+ 0x177: 0x45203, // del
+ 0x178: 0x36c07, // onclose
+ 0x179: 0xb903, // kbd
+ 0x17a: 0x31906, // applet
+ 0x17b: 0x2e004, // href
+ 0x17c: 0x5f108, // onresize
+ 0x17e: 0x49d0c, // onloadeddata
+ 0x180: 0xcc02, // tr
+ 0x181: 0x2c00a, // formtarget
+ 0x182: 0x11005, // title
+ 0x183: 0x6ff05, // style
+ 0x184: 0xd206, // strike
+ 0x185: 0x59e06, // usemap
+ 0x186: 0x2fc06, // iframe
+ 0x187: 0x1004, // main
+ 0x189: 0x7b07, // picture
+ 0x18c: 0x31605, // ismap
+ 0x18e: 0x4a504, // data
+ 0x18f: 0x5905, // label
+ 0x191: 0x3d10e, // referrerpolicy
+ 0x192: 0x15602, // th
+ 0x194: 0x53606, // prompt
+ 0x195: 0x56807, // section
+ 0x197: 0x6d107, // optimum
+ 0x198: 0x2db04, // high
+ 0x199: 0x15c02, // h1
+ 0x19a: 0x65909, // onstalled
+ 0x19b: 0x16d03, // var
+ 0x19c: 0x4204, // time
+ 0x19e: 0x67402, // ms
+ 0x19f: 0x33106, // header
+ 0x1a0: 0x4da09, // onmessage
+ 0x1a1: 0x1a605, // nonce
+ 0x1a2: 0x26e0a, // formaction
+ 0x1a3: 0x22006, // center
+ 0x1a4: 0x3704, // nobr
+ 0x1a5: 0x59505, // table
+ 0x1a6: 0x4a907, // listing
+ 0x1a7: 0x18106, // legend
+ 0x1a9: 0x29b09, // challenge
+ 0x1aa: 0x24806, // figure
+ 0x1ab: 0xe605, // media
+ 0x1ae: 0xd904, // type
+ 0x1af: 0x3f04, // font
+ 0x1b0: 0x4da0e, // onmessageerror
+ 0x1b1: 0x37108, // seamless
+ 0x1b2: 0x8703, // dfn
+ 0x1b3: 0x5c705, // defer
+ 0x1b4: 0xc303, // low
+ 0x1b5: 0x19a03, // rtc
+ 0x1b6: 0x5230b, // onmouseover
+ 0x1b7: 0x2b20a, // novalidate
+ 0x1b8: 0x71c0a, // workertype
+ 0x1ba: 0x3cd07, // itemref
+ 0x1bd: 0x1, // a
+ 0x1be: 0x31803, // map
+ 0x1bf: 0x400c, // ontimeupdate
+ 0x1c0: 0x15e07, // bgsound
+ 0x1c1: 0x3206, // keygen
+ 0x1c2: 0x2705, // tbody
+ 0x1c5: 0x64406, // onshow
+ 0x1c7: 0x2501, // s
+ 0x1c8: 0x6607, // pattern
+ 0x1cc: 0x14d10, // oncanplaythrough
+ 0x1ce: 0x2d702, // dd
+ 0x1cf: 0x6f906, // srcset
+ 0x1d0: 0x17003, // big
+ 0x1d2: 0x65108, // sortable
+ 0x1d3: 0x48007, // onkeyup
+ 0x1d5: 0x5a406, // onplay
+ 0x1d7: 0x4b804, // meta
+ 0x1d8: 0x40306, // ondrop
+ 0x1da: 0x60008, // onscroll
+ 0x1db: 0x1fb0b, // crossorigin
+ 0x1dc: 0x5730a, // onpageshow
+ 0x1dd: 0x4, // abbr
+ 0x1de: 0x9202, // td
+ 0x1df: 0x58b0f, // contenteditable
+ 0x1e0: 0x27206, // action
+ 0x1e1: 0x1400b, // playsinline
+ 0x1e2: 0x43107, // onfocus
+ 0x1e3: 0x2e008, // hreflang
+ 0x1e5: 0x5160a, // onmouseout
+ 0x1e6: 0x5ea07, // onreset
+ 0x1e7: 0x13c08, // autoplay
+ 0x1e8: 0x63109, // onseeking
+ 0x1ea: 0x67506, // scoped
+ 0x1ec: 0x30a, // radiogroup
+ 0x1ee: 0x3800b, // contextmenu
+ 0x1ef: 0x52e09, // onmouseup
+ 0x1f1: 0x2ca06, // hgroup
+ 0x1f2: 0x2080f, // allowfullscreen
+ 0x1f3: 0x4be08, // tabindex
+ 0x1f6: 0x30f07, // isindex
+ 0x1f7: 0x1a0e, // accept-charset
+ 0x1f8: 0x2ae0e, // formnovalidate
+ 0x1fb: 0x1c90e, // annotation-xml
+ 0x1fc: 0x6e05, // embed
+ 0x1fd: 0x21806, // script
+ 0x1fe: 0xbb06, // dialog
+ 0x1ff: 0x1d707, // command
+}
+
+const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" +
+ "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" +
+ "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" +
+ "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" +
+ "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" +
+ "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" +
+ "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" +
+ "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" +
+ "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" +
+ "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" +
+ "ignObjectforeignobjectformactionautocompleteerrorformenctype" +
+ "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" +
+ "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" +
+ "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" +
+ "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" +
+ "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" +
+ "enterondragexitemreferrerpolicyondragleaveondragoverondragst" +
+ "articleondropzonemptiedondurationchangeonendedonerroronfocus" +
+ "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" +
+ "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" +
+ "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" +
+ "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" +
+ "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" +
+ "classectionbluronpageshowbronpastepublicontenteditableonpaus" +
+ "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" +
+ "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" +
+ "violationauxclickonseekedonseekingonselectedonshowidth6onsor" +
+ "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" +
+ "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" +
+ "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" +
+ "arysupsvgsystemplateworkertypewrap"
diff --git a/vendor/golang.org/x/net/html/charset/charset.go b/vendor/golang.org/x/net/html/charset/charset.go
new file mode 100644
index 0000000..13bed15
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/charset.go
@@ -0,0 +1,257 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package charset provides common text encodings for HTML documents.
+//
+// The mapping from encoding labels to encodings is defined at
+// https://encoding.spec.whatwg.org/.
+package charset // import "golang.org/x/net/html/charset"
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "mime"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/net/html"
+ "golang.org/x/text/encoding"
+ "golang.org/x/text/encoding/charmap"
+ "golang.org/x/text/encoding/htmlindex"
+ "golang.org/x/text/transform"
+)
+
+// Lookup returns the encoding with the specified label, and its canonical
+// name. It returns nil and the empty string if label is not one of the
+// standard encodings for HTML. Matching is case-insensitive and ignores
+// leading and trailing whitespace. Encoders will use HTML escape sequences for
+// runes that are not supported by the character set.
+func Lookup(label string) (e encoding.Encoding, name string) {
+ e, err := htmlindex.Get(label)
+ if err != nil {
+ return nil, ""
+ }
+ name, _ = htmlindex.Name(e)
+ return &htmlEncoding{e}, name
+}
+
+type htmlEncoding struct{ encoding.Encoding }
+
+func (h *htmlEncoding) NewEncoder() *encoding.Encoder {
+ // HTML requires a non-terminating legacy encoder. We use HTML escapes to
+ // substitute unsupported code points.
+ return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder())
+}
+
+// DetermineEncoding determines the encoding of an HTML document by examining
+// up to the first 1024 bytes of content and the declared Content-Type.
+//
+// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding
+func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) {
+ if len(content) > 1024 {
+ content = content[:1024]
+ }
+
+ for _, b := range boms {
+ if bytes.HasPrefix(content, b.bom) {
+ e, name = Lookup(b.enc)
+ return e, name, true
+ }
+ }
+
+ if _, params, err := mime.ParseMediaType(contentType); err == nil {
+ if cs, ok := params["charset"]; ok {
+ if e, name = Lookup(cs); e != nil {
+ return e, name, true
+ }
+ }
+ }
+
+ if len(content) > 0 {
+ e, name = prescan(content)
+ if e != nil {
+ return e, name, false
+ }
+ }
+
+ // Try to detect UTF-8.
+ // First eliminate any partial rune at the end.
+ for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- {
+ b := content[i]
+ if b < 0x80 {
+ break
+ }
+ if utf8.RuneStart(b) {
+ content = content[:i]
+ break
+ }
+ }
+ hasHighBit := false
+ for _, c := range content {
+ if c >= 0x80 {
+ hasHighBit = true
+ break
+ }
+ }
+ if hasHighBit && utf8.Valid(content) {
+ return encoding.Nop, "utf-8", false
+ }
+
+ // TODO: change default depending on user's locale?
+ return charmap.Windows1252, "windows-1252", false
+}
+
+// NewReader returns an io.Reader that converts the content of r to UTF-8.
+// It calls DetermineEncoding to find out what r's encoding is.
+func NewReader(r io.Reader, contentType string) (io.Reader, error) {
+ preview := make([]byte, 1024)
+ n, err := io.ReadFull(r, preview)
+ switch {
+ case err == io.ErrUnexpectedEOF:
+ preview = preview[:n]
+ r = bytes.NewReader(preview)
+ case err != nil:
+ return nil, err
+ default:
+ r = io.MultiReader(bytes.NewReader(preview), r)
+ }
+
+ if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop {
+ r = transform.NewReader(r, e.NewDecoder())
+ }
+ return r, nil
+}
+
+// NewReaderLabel returns a reader that converts from the specified charset to
+// UTF-8. It uses Lookup to find the encoding that corresponds to label, and
+// returns an error if Lookup returns nil. It is suitable for use as
+// encoding/xml.Decoder's CharsetReader function.
+func NewReaderLabel(label string, input io.Reader) (io.Reader, error) {
+ e, _ := Lookup(label)
+ if e == nil {
+ return nil, fmt.Errorf("unsupported charset: %q", label)
+ }
+ return transform.NewReader(input, e.NewDecoder()), nil
+}
+
+func prescan(content []byte) (e encoding.Encoding, name string) {
+ z := html.NewTokenizer(bytes.NewReader(content))
+ for {
+ switch z.Next() {
+ case html.ErrorToken:
+ return nil, ""
+
+ case html.StartTagToken, html.SelfClosingTagToken:
+ tagName, hasAttr := z.TagName()
+ if !bytes.Equal(tagName, []byte("meta")) {
+ continue
+ }
+ attrList := make(map[string]bool)
+ gotPragma := false
+
+ const (
+ dontKnow = iota
+ doNeedPragma
+ doNotNeedPragma
+ )
+ needPragma := dontKnow
+
+ name = ""
+ e = nil
+ for hasAttr {
+ var key, val []byte
+ key, val, hasAttr = z.TagAttr()
+ ks := string(key)
+ if attrList[ks] {
+ continue
+ }
+ attrList[ks] = true
+ for i, c := range val {
+ if 'A' <= c && c <= 'Z' {
+ val[i] = c + 0x20
+ }
+ }
+
+ switch ks {
+ case "http-equiv":
+ if bytes.Equal(val, []byte("content-type")) {
+ gotPragma = true
+ }
+
+ case "content":
+ if e == nil {
+ name = fromMetaElement(string(val))
+ if name != "" {
+ e, name = Lookup(name)
+ if e != nil {
+ needPragma = doNeedPragma
+ }
+ }
+ }
+
+ case "charset":
+ e, name = Lookup(string(val))
+ needPragma = doNotNeedPragma
+ }
+ }
+
+ if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma {
+ continue
+ }
+
+ if strings.HasPrefix(name, "utf-16") {
+ name = "utf-8"
+ e = encoding.Nop
+ }
+
+ if e != nil {
+ return e, name
+ }
+ }
+ }
+}
+
+func fromMetaElement(s string) string {
+ for s != "" {
+ csLoc := strings.Index(s, "charset")
+ if csLoc == -1 {
+ return ""
+ }
+ s = s[csLoc+len("charset"):]
+ s = strings.TrimLeft(s, " \t\n\f\r")
+ if !strings.HasPrefix(s, "=") {
+ continue
+ }
+ s = s[1:]
+ s = strings.TrimLeft(s, " \t\n\f\r")
+ if s == "" {
+ return ""
+ }
+ if q := s[0]; q == '"' || q == '\'' {
+ s = s[1:]
+ closeQuote := strings.IndexRune(s, rune(q))
+ if closeQuote == -1 {
+ return ""
+ }
+ return s[:closeQuote]
+ }
+
+ end := strings.IndexAny(s, "; \t\n\f\r")
+ if end == -1 {
+ end = len(s)
+ }
+ return s[:end]
+ }
+ return ""
+}
+
+var boms = []struct {
+ bom []byte
+ enc string
+}{
+ {[]byte{0xfe, 0xff}, "utf-16be"},
+ {[]byte{0xff, 0xfe}, "utf-16le"},
+ {[]byte{0xef, 0xbb, 0xbf}, "utf-8"},
+}
diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go
new file mode 100644
index 0000000..ff7acf2
--- /dev/null
+++ b/vendor/golang.org/x/net/html/const.go
@@ -0,0 +1,111 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+// Section 12.2.4.2 of the HTML5 specification says "The following elements
+// have varying levels of special parsing rules".
+// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements
+var isSpecialElementMap = map[string]bool{
+ "address": true,
+ "applet": true,
+ "area": true,
+ "article": true,
+ "aside": true,
+ "base": true,
+ "basefont": true,
+ "bgsound": true,
+ "blockquote": true,
+ "body": true,
+ "br": true,
+ "button": true,
+ "caption": true,
+ "center": true,
+ "col": true,
+ "colgroup": true,
+ "dd": true,
+ "details": true,
+ "dir": true,
+ "div": true,
+ "dl": true,
+ "dt": true,
+ "embed": true,
+ "fieldset": true,
+ "figcaption": true,
+ "figure": true,
+ "footer": true,
+ "form": true,
+ "frame": true,
+ "frameset": true,
+ "h1": true,
+ "h2": true,
+ "h3": true,
+ "h4": true,
+ "h5": true,
+ "h6": true,
+ "head": true,
+ "header": true,
+ "hgroup": true,
+ "hr": true,
+ "html": true,
+ "iframe": true,
+ "img": true,
+ "input": true,
+ "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility.
+ "li": true,
+ "link": true,
+ "listing": true,
+ "main": true,
+ "marquee": true,
+ "menu": true,
+ "meta": true,
+ "nav": true,
+ "noembed": true,
+ "noframes": true,
+ "noscript": true,
+ "object": true,
+ "ol": true,
+ "p": true,
+ "param": true,
+ "plaintext": true,
+ "pre": true,
+ "script": true,
+ "section": true,
+ "select": true,
+ "source": true,
+ "style": true,
+ "summary": true,
+ "table": true,
+ "tbody": true,
+ "td": true,
+ "template": true,
+ "textarea": true,
+ "tfoot": true,
+ "th": true,
+ "thead": true,
+ "title": true,
+ "tr": true,
+ "track": true,
+ "ul": true,
+ "wbr": true,
+ "xmp": true,
+}
+
+func isSpecialElement(element *Node) bool {
+ switch element.Namespace {
+ case "", "html":
+ return isSpecialElementMap[element.Data]
+ case "math":
+ switch element.Data {
+ case "mi", "mo", "mn", "ms", "mtext", "annotation-xml":
+ return true
+ }
+ case "svg":
+ switch element.Data {
+ case "foreignObject", "desc", "title":
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go
new file mode 100644
index 0000000..3a7e5ab
--- /dev/null
+++ b/vendor/golang.org/x/net/html/doc.go
@@ -0,0 +1,127 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package html implements an HTML5-compliant tokenizer and parser.
+
+Tokenization is done by creating a Tokenizer for an io.Reader r. It is the
+caller's responsibility to ensure that r provides UTF-8 encoded HTML.
+
+ z := html.NewTokenizer(r)
+
+Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),
+which parses the next token and returns its type, or an error:
+
+ for {
+ tt := z.Next()
+ if tt == html.ErrorToken {
+ // ...
+ return ...
+ }
+ // Process the current token.
+ }
+
+There are two APIs for retrieving the current token. The high-level API is to
+call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs
+allow optionally calling Raw after Next but before Token, Text, TagName, or
+TagAttr. In EBNF notation, the valid call sequence per token is:
+
+ Next {Raw} [ Token | Text | TagName {TagAttr} ]
+
+Token returns an independent data structure that completely describes a token.
+Entities (such as "<") are unescaped, tag names and attribute keys are
+lower-cased, and attributes are collected into a []Attribute. For example:
+
+ for {
+ if z.Next() == html.ErrorToken {
+ // Returning io.EOF indicates success.
+ return z.Err()
+ }
+ emitToken(z.Token())
+ }
+
+The low-level API performs fewer allocations and copies, but the contents of
+the []byte values returned by Text, TagName and TagAttr may change on the next
+call to Next. For example, to extract an HTML page's anchor text:
+
+ depth := 0
+ for {
+ tt := z.Next()
+ switch tt {
+ case html.ErrorToken:
+ return z.Err()
+ case html.TextToken:
+ if depth > 0 {
+ // emitBytes should copy the []byte it receives,
+ // if it doesn't process it immediately.
+ emitBytes(z.Text())
+ }
+ case html.StartTagToken, html.EndTagToken:
+ tn, _ := z.TagName()
+ if len(tn) == 1 && tn[0] == 'a' {
+ if tt == html.StartTagToken {
+ depth++
+ } else {
+ depth--
+ }
+ }
+ }
+ }
+
+Parsing is done by calling Parse with an io.Reader, which returns the root of
+the parse tree (the document element) as a *Node. It is the caller's
+responsibility to ensure that the Reader provides UTF-8 encoded HTML. For
+example, to process each anchor node in depth-first order:
+
+ doc, err := html.Parse(r)
+ if err != nil {
+ // ...
+ }
+ var f func(*html.Node)
+ f = func(n *html.Node) {
+ if n.Type == html.ElementNode && n.Data == "a" {
+ // Do something with n...
+ }
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ f(c)
+ }
+ }
+ f(doc)
+
+The relevant specifications include:
+https://html.spec.whatwg.org/multipage/syntax.html and
+https://html.spec.whatwg.org/multipage/syntax.html#tokenization
+
+# Security Considerations
+
+Care should be taken when parsing and interpreting HTML, whether full documents
+or fragments, within the framework of the HTML specification, especially with
+regard to untrusted inputs.
+
+This package provides both a tokenizer and a parser, which implement the
+tokenization, and tokenization and tree construction stages of the WHATWG HTML
+parsing specification respectively. While the tokenizer parses and normalizes
+individual HTML tokens, only the parser constructs the DOM tree from the
+tokenized HTML, as described in the tree construction stage of the
+specification, dynamically modifying or extending the document's DOM tree.
+
+If your use case requires semantically well-formed HTML documents, as defined by
+the WHATWG specification, the parser should be used rather than the tokenizer.
+
+In security contexts, if trust decisions are being made using the tokenized or
+parsed content, the input must be re-serialized (for instance by using Render or
+Token.String) in order for those trust decisions to hold, as the process of
+tokenization or parsing may alter the content.
+*/
+package html // import "golang.org/x/net/html"
+
+// The tokenization algorithm implemented by this package is not a line-by-line
+// transliteration of the relatively verbose state-machine in the WHATWG
+// specification. A more direct approach is used instead, where the program
+// counter implies the state, such as whether it is tokenizing a tag or a text
+// node. Specification compliance is verified by checking expected and actual
+// outputs over a test suite rather than aiming for algorithmic fidelity.
+
+// TODO(nigeltao): Does a DOM API belong in this package or a separate one?
+// TODO(nigeltao): How does parsing interact with a JavaScript engine?
diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go
new file mode 100644
index 0000000..c484e5a
--- /dev/null
+++ b/vendor/golang.org/x/net/html/doctype.go
@@ -0,0 +1,156 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "strings"
+)
+
+// parseDoctype parses the data from a DoctypeToken into a name,
+// public identifier, and system identifier. It returns a Node whose Type
+// is DoctypeNode, whose Data is the name, and which has attributes
+// named "system" and "public" for the two identifiers if they were present.
+// quirks is whether the document should be parsed in "quirks mode".
+func parseDoctype(s string) (n *Node, quirks bool) {
+ n = &Node{Type: DoctypeNode}
+
+ // Find the name.
+ space := strings.IndexAny(s, whitespace)
+ if space == -1 {
+ space = len(s)
+ }
+ n.Data = s[:space]
+ // The comparison to "html" is case-sensitive.
+ if n.Data != "html" {
+ quirks = true
+ }
+ n.Data = strings.ToLower(n.Data)
+ s = strings.TrimLeft(s[space:], whitespace)
+
+ if len(s) < 6 {
+ // It can't start with "PUBLIC" or "SYSTEM".
+ // Ignore the rest of the string.
+ return n, quirks || s != ""
+ }
+
+ key := strings.ToLower(s[:6])
+ s = s[6:]
+ for key == "public" || key == "system" {
+ s = strings.TrimLeft(s, whitespace)
+ if s == "" {
+ break
+ }
+ quote := s[0]
+ if quote != '"' && quote != '\'' {
+ break
+ }
+ s = s[1:]
+ q := strings.IndexRune(s, rune(quote))
+ var id string
+ if q == -1 {
+ id = s
+ s = ""
+ } else {
+ id = s[:q]
+ s = s[q+1:]
+ }
+ n.Attr = append(n.Attr, Attribute{Key: key, Val: id})
+ if key == "public" {
+ key = "system"
+ } else {
+ key = ""
+ }
+ }
+
+ if key != "" || s != "" {
+ quirks = true
+ } else if len(n.Attr) > 0 {
+ if n.Attr[0].Key == "public" {
+ public := strings.ToLower(n.Attr[0].Val)
+ switch public {
+ case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html":
+ quirks = true
+ default:
+ for _, q := range quirkyIDs {
+ if strings.HasPrefix(public, q) {
+ quirks = true
+ break
+ }
+ }
+ }
+ // The following two public IDs only cause quirks mode if there is no system ID.
+ if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") ||
+ strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) {
+ quirks = true
+ }
+ }
+ if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
+ strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
+ quirks = true
+ }
+ }
+
+ return n, quirks
+}
+
+// quirkyIDs is a list of public doctype identifiers that cause a document
+// to be interpreted in quirks mode. The identifiers should be in lower case.
+var quirkyIDs = []string{
+ "+//silmaril//dtd html pro v0r11 19970101//",
+ "-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
+ "-//as//dtd html 3.0 aswedit + extensions//",
+ "-//ietf//dtd html 2.0 level 1//",
+ "-//ietf//dtd html 2.0 level 2//",
+ "-//ietf//dtd html 2.0 strict level 1//",
+ "-//ietf//dtd html 2.0 strict level 2//",
+ "-//ietf//dtd html 2.0 strict//",
+ "-//ietf//dtd html 2.0//",
+ "-//ietf//dtd html 2.1e//",
+ "-//ietf//dtd html 3.0//",
+ "-//ietf//dtd html 3.2 final//",
+ "-//ietf//dtd html 3.2//",
+ "-//ietf//dtd html 3//",
+ "-//ietf//dtd html level 0//",
+ "-//ietf//dtd html level 1//",
+ "-//ietf//dtd html level 2//",
+ "-//ietf//dtd html level 3//",
+ "-//ietf//dtd html strict level 0//",
+ "-//ietf//dtd html strict level 1//",
+ "-//ietf//dtd html strict level 2//",
+ "-//ietf//dtd html strict level 3//",
+ "-//ietf//dtd html strict//",
+ "-//ietf//dtd html//",
+ "-//metrius//dtd metrius presentational//",
+ "-//microsoft//dtd internet explorer 2.0 html strict//",
+ "-//microsoft//dtd internet explorer 2.0 html//",
+ "-//microsoft//dtd internet explorer 2.0 tables//",
+ "-//microsoft//dtd internet explorer 3.0 html strict//",
+ "-//microsoft//dtd internet explorer 3.0 html//",
+ "-//microsoft//dtd internet explorer 3.0 tables//",
+ "-//netscape comm. corp.//dtd html//",
+ "-//netscape comm. corp.//dtd strict html//",
+ "-//o'reilly and associates//dtd html 2.0//",
+ "-//o'reilly and associates//dtd html extended 1.0//",
+ "-//o'reilly and associates//dtd html extended relaxed 1.0//",
+ "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
+ "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
+ "-//spyglass//dtd html 2.0 extended//",
+ "-//sq//dtd html 2.0 hotmetal + extensions//",
+ "-//sun microsystems corp.//dtd hotjava html//",
+ "-//sun microsystems corp.//dtd hotjava strict html//",
+ "-//w3c//dtd html 3 1995-03-24//",
+ "-//w3c//dtd html 3.2 draft//",
+ "-//w3c//dtd html 3.2 final//",
+ "-//w3c//dtd html 3.2//",
+ "-//w3c//dtd html 3.2s draft//",
+ "-//w3c//dtd html 4.0 frameset//",
+ "-//w3c//dtd html 4.0 transitional//",
+ "-//w3c//dtd html experimental 19960712//",
+ "-//w3c//dtd html experimental 970421//",
+ "-//w3c//dtd w3 html//",
+ "-//w3o//dtd w3 html 3.0//",
+ "-//webtechs//dtd mozilla html 2.0//",
+ "-//webtechs//dtd mozilla html//",
+}
diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go
new file mode 100644
index 0000000..b628880
--- /dev/null
+++ b/vendor/golang.org/x/net/html/entity.go
@@ -0,0 +1,2253 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+// All entities that do not end with ';' are 6 or fewer bytes long.
+const longestEntityWithoutSemicolon = 6
+
+// entity is a map from HTML entity names to their values. The semicolon matters:
+// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references
+// lists both "amp" and "amp;" as two separate entries.
+//
+// Note that the HTML5 list is larger than the HTML4 list at
+// http://www.w3.org/TR/html4/sgml/entities.html
+var entity = map[string]rune{
+ "AElig;": '\U000000C6',
+ "AMP;": '\U00000026',
+ "Aacute;": '\U000000C1',
+ "Abreve;": '\U00000102',
+ "Acirc;": '\U000000C2',
+ "Acy;": '\U00000410',
+ "Afr;": '\U0001D504',
+ "Agrave;": '\U000000C0',
+ "Alpha;": '\U00000391',
+ "Amacr;": '\U00000100',
+ "And;": '\U00002A53',
+ "Aogon;": '\U00000104',
+ "Aopf;": '\U0001D538',
+ "ApplyFunction;": '\U00002061',
+ "Aring;": '\U000000C5',
+ "Ascr;": '\U0001D49C',
+ "Assign;": '\U00002254',
+ "Atilde;": '\U000000C3',
+ "Auml;": '\U000000C4',
+ "Backslash;": '\U00002216',
+ "Barv;": '\U00002AE7',
+ "Barwed;": '\U00002306',
+ "Bcy;": '\U00000411',
+ "Because;": '\U00002235',
+ "Bernoullis;": '\U0000212C',
+ "Beta;": '\U00000392',
+ "Bfr;": '\U0001D505',
+ "Bopf;": '\U0001D539',
+ "Breve;": '\U000002D8',
+ "Bscr;": '\U0000212C',
+ "Bumpeq;": '\U0000224E',
+ "CHcy;": '\U00000427',
+ "COPY;": '\U000000A9',
+ "Cacute;": '\U00000106',
+ "Cap;": '\U000022D2',
+ "CapitalDifferentialD;": '\U00002145',
+ "Cayleys;": '\U0000212D',
+ "Ccaron;": '\U0000010C',
+ "Ccedil;": '\U000000C7',
+ "Ccirc;": '\U00000108',
+ "Cconint;": '\U00002230',
+ "Cdot;": '\U0000010A',
+ "Cedilla;": '\U000000B8',
+ "CenterDot;": '\U000000B7',
+ "Cfr;": '\U0000212D',
+ "Chi;": '\U000003A7',
+ "CircleDot;": '\U00002299',
+ "CircleMinus;": '\U00002296',
+ "CirclePlus;": '\U00002295',
+ "CircleTimes;": '\U00002297',
+ "ClockwiseContourIntegral;": '\U00002232',
+ "CloseCurlyDoubleQuote;": '\U0000201D',
+ "CloseCurlyQuote;": '\U00002019',
+ "Colon;": '\U00002237',
+ "Colone;": '\U00002A74',
+ "Congruent;": '\U00002261',
+ "Conint;": '\U0000222F',
+ "ContourIntegral;": '\U0000222E',
+ "Copf;": '\U00002102',
+ "Coproduct;": '\U00002210',
+ "CounterClockwiseContourIntegral;": '\U00002233',
+ "Cross;": '\U00002A2F',
+ "Cscr;": '\U0001D49E',
+ "Cup;": '\U000022D3',
+ "CupCap;": '\U0000224D',
+ "DD;": '\U00002145',
+ "DDotrahd;": '\U00002911',
+ "DJcy;": '\U00000402',
+ "DScy;": '\U00000405',
+ "DZcy;": '\U0000040F',
+ "Dagger;": '\U00002021',
+ "Darr;": '\U000021A1',
+ "Dashv;": '\U00002AE4',
+ "Dcaron;": '\U0000010E',
+ "Dcy;": '\U00000414',
+ "Del;": '\U00002207',
+ "Delta;": '\U00000394',
+ "Dfr;": '\U0001D507',
+ "DiacriticalAcute;": '\U000000B4',
+ "DiacriticalDot;": '\U000002D9',
+ "DiacriticalDoubleAcute;": '\U000002DD',
+ "DiacriticalGrave;": '\U00000060',
+ "DiacriticalTilde;": '\U000002DC',
+ "Diamond;": '\U000022C4',
+ "DifferentialD;": '\U00002146',
+ "Dopf;": '\U0001D53B',
+ "Dot;": '\U000000A8',
+ "DotDot;": '\U000020DC',
+ "DotEqual;": '\U00002250',
+ "DoubleContourIntegral;": '\U0000222F',
+ "DoubleDot;": '\U000000A8',
+ "DoubleDownArrow;": '\U000021D3',
+ "DoubleLeftArrow;": '\U000021D0',
+ "DoubleLeftRightArrow;": '\U000021D4',
+ "DoubleLeftTee;": '\U00002AE4',
+ "DoubleLongLeftArrow;": '\U000027F8',
+ "DoubleLongLeftRightArrow;": '\U000027FA',
+ "DoubleLongRightArrow;": '\U000027F9',
+ "DoubleRightArrow;": '\U000021D2',
+ "DoubleRightTee;": '\U000022A8',
+ "DoubleUpArrow;": '\U000021D1',
+ "DoubleUpDownArrow;": '\U000021D5',
+ "DoubleVerticalBar;": '\U00002225',
+ "DownArrow;": '\U00002193',
+ "DownArrowBar;": '\U00002913',
+ "DownArrowUpArrow;": '\U000021F5',
+ "DownBreve;": '\U00000311',
+ "DownLeftRightVector;": '\U00002950',
+ "DownLeftTeeVector;": '\U0000295E',
+ "DownLeftVector;": '\U000021BD',
+ "DownLeftVectorBar;": '\U00002956',
+ "DownRightTeeVector;": '\U0000295F',
+ "DownRightVector;": '\U000021C1',
+ "DownRightVectorBar;": '\U00002957',
+ "DownTee;": '\U000022A4',
+ "DownTeeArrow;": '\U000021A7',
+ "Downarrow;": '\U000021D3',
+ "Dscr;": '\U0001D49F',
+ "Dstrok;": '\U00000110',
+ "ENG;": '\U0000014A',
+ "ETH;": '\U000000D0',
+ "Eacute;": '\U000000C9',
+ "Ecaron;": '\U0000011A',
+ "Ecirc;": '\U000000CA',
+ "Ecy;": '\U0000042D',
+ "Edot;": '\U00000116',
+ "Efr;": '\U0001D508',
+ "Egrave;": '\U000000C8',
+ "Element;": '\U00002208',
+ "Emacr;": '\U00000112',
+ "EmptySmallSquare;": '\U000025FB',
+ "EmptyVerySmallSquare;": '\U000025AB',
+ "Eogon;": '\U00000118',
+ "Eopf;": '\U0001D53C',
+ "Epsilon;": '\U00000395',
+ "Equal;": '\U00002A75',
+ "EqualTilde;": '\U00002242',
+ "Equilibrium;": '\U000021CC',
+ "Escr;": '\U00002130',
+ "Esim;": '\U00002A73',
+ "Eta;": '\U00000397',
+ "Euml;": '\U000000CB',
+ "Exists;": '\U00002203',
+ "ExponentialE;": '\U00002147',
+ "Fcy;": '\U00000424',
+ "Ffr;": '\U0001D509',
+ "FilledSmallSquare;": '\U000025FC',
+ "FilledVerySmallSquare;": '\U000025AA',
+ "Fopf;": '\U0001D53D',
+ "ForAll;": '\U00002200',
+ "Fouriertrf;": '\U00002131',
+ "Fscr;": '\U00002131',
+ "GJcy;": '\U00000403',
+ "GT;": '\U0000003E',
+ "Gamma;": '\U00000393',
+ "Gammad;": '\U000003DC',
+ "Gbreve;": '\U0000011E',
+ "Gcedil;": '\U00000122',
+ "Gcirc;": '\U0000011C',
+ "Gcy;": '\U00000413',
+ "Gdot;": '\U00000120',
+ "Gfr;": '\U0001D50A',
+ "Gg;": '\U000022D9',
+ "Gopf;": '\U0001D53E',
+ "GreaterEqual;": '\U00002265',
+ "GreaterEqualLess;": '\U000022DB',
+ "GreaterFullEqual;": '\U00002267',
+ "GreaterGreater;": '\U00002AA2',
+ "GreaterLess;": '\U00002277',
+ "GreaterSlantEqual;": '\U00002A7E',
+ "GreaterTilde;": '\U00002273',
+ "Gscr;": '\U0001D4A2',
+ "Gt;": '\U0000226B',
+ "HARDcy;": '\U0000042A',
+ "Hacek;": '\U000002C7',
+ "Hat;": '\U0000005E',
+ "Hcirc;": '\U00000124',
+ "Hfr;": '\U0000210C',
+ "HilbertSpace;": '\U0000210B',
+ "Hopf;": '\U0000210D',
+ "HorizontalLine;": '\U00002500',
+ "Hscr;": '\U0000210B',
+ "Hstrok;": '\U00000126',
+ "HumpDownHump;": '\U0000224E',
+ "HumpEqual;": '\U0000224F',
+ "IEcy;": '\U00000415',
+ "IJlig;": '\U00000132',
+ "IOcy;": '\U00000401',
+ "Iacute;": '\U000000CD',
+ "Icirc;": '\U000000CE',
+ "Icy;": '\U00000418',
+ "Idot;": '\U00000130',
+ "Ifr;": '\U00002111',
+ "Igrave;": '\U000000CC',
+ "Im;": '\U00002111',
+ "Imacr;": '\U0000012A',
+ "ImaginaryI;": '\U00002148',
+ "Implies;": '\U000021D2',
+ "Int;": '\U0000222C',
+ "Integral;": '\U0000222B',
+ "Intersection;": '\U000022C2',
+ "InvisibleComma;": '\U00002063',
+ "InvisibleTimes;": '\U00002062',
+ "Iogon;": '\U0000012E',
+ "Iopf;": '\U0001D540',
+ "Iota;": '\U00000399',
+ "Iscr;": '\U00002110',
+ "Itilde;": '\U00000128',
+ "Iukcy;": '\U00000406',
+ "Iuml;": '\U000000CF',
+ "Jcirc;": '\U00000134',
+ "Jcy;": '\U00000419',
+ "Jfr;": '\U0001D50D',
+ "Jopf;": '\U0001D541',
+ "Jscr;": '\U0001D4A5',
+ "Jsercy;": '\U00000408',
+ "Jukcy;": '\U00000404',
+ "KHcy;": '\U00000425',
+ "KJcy;": '\U0000040C',
+ "Kappa;": '\U0000039A',
+ "Kcedil;": '\U00000136',
+ "Kcy;": '\U0000041A',
+ "Kfr;": '\U0001D50E',
+ "Kopf;": '\U0001D542',
+ "Kscr;": '\U0001D4A6',
+ "LJcy;": '\U00000409',
+ "LT;": '\U0000003C',
+ "Lacute;": '\U00000139',
+ "Lambda;": '\U0000039B',
+ "Lang;": '\U000027EA',
+ "Laplacetrf;": '\U00002112',
+ "Larr;": '\U0000219E',
+ "Lcaron;": '\U0000013D',
+ "Lcedil;": '\U0000013B',
+ "Lcy;": '\U0000041B',
+ "LeftAngleBracket;": '\U000027E8',
+ "LeftArrow;": '\U00002190',
+ "LeftArrowBar;": '\U000021E4',
+ "LeftArrowRightArrow;": '\U000021C6',
+ "LeftCeiling;": '\U00002308',
+ "LeftDoubleBracket;": '\U000027E6',
+ "LeftDownTeeVector;": '\U00002961',
+ "LeftDownVector;": '\U000021C3',
+ "LeftDownVectorBar;": '\U00002959',
+ "LeftFloor;": '\U0000230A',
+ "LeftRightArrow;": '\U00002194',
+ "LeftRightVector;": '\U0000294E',
+ "LeftTee;": '\U000022A3',
+ "LeftTeeArrow;": '\U000021A4',
+ "LeftTeeVector;": '\U0000295A',
+ "LeftTriangle;": '\U000022B2',
+ "LeftTriangleBar;": '\U000029CF',
+ "LeftTriangleEqual;": '\U000022B4',
+ "LeftUpDownVector;": '\U00002951',
+ "LeftUpTeeVector;": '\U00002960',
+ "LeftUpVector;": '\U000021BF',
+ "LeftUpVectorBar;": '\U00002958',
+ "LeftVector;": '\U000021BC',
+ "LeftVectorBar;": '\U00002952',
+ "Leftarrow;": '\U000021D0',
+ "Leftrightarrow;": '\U000021D4',
+ "LessEqualGreater;": '\U000022DA',
+ "LessFullEqual;": '\U00002266',
+ "LessGreater;": '\U00002276',
+ "LessLess;": '\U00002AA1',
+ "LessSlantEqual;": '\U00002A7D',
+ "LessTilde;": '\U00002272',
+ "Lfr;": '\U0001D50F',
+ "Ll;": '\U000022D8',
+ "Lleftarrow;": '\U000021DA',
+ "Lmidot;": '\U0000013F',
+ "LongLeftArrow;": '\U000027F5',
+ "LongLeftRightArrow;": '\U000027F7',
+ "LongRightArrow;": '\U000027F6',
+ "Longleftarrow;": '\U000027F8',
+ "Longleftrightarrow;": '\U000027FA',
+ "Longrightarrow;": '\U000027F9',
+ "Lopf;": '\U0001D543',
+ "LowerLeftArrow;": '\U00002199',
+ "LowerRightArrow;": '\U00002198',
+ "Lscr;": '\U00002112',
+ "Lsh;": '\U000021B0',
+ "Lstrok;": '\U00000141',
+ "Lt;": '\U0000226A',
+ "Map;": '\U00002905',
+ "Mcy;": '\U0000041C',
+ "MediumSpace;": '\U0000205F',
+ "Mellintrf;": '\U00002133',
+ "Mfr;": '\U0001D510',
+ "MinusPlus;": '\U00002213',
+ "Mopf;": '\U0001D544',
+ "Mscr;": '\U00002133',
+ "Mu;": '\U0000039C',
+ "NJcy;": '\U0000040A',
+ "Nacute;": '\U00000143',
+ "Ncaron;": '\U00000147',
+ "Ncedil;": '\U00000145',
+ "Ncy;": '\U0000041D',
+ "NegativeMediumSpace;": '\U0000200B',
+ "NegativeThickSpace;": '\U0000200B',
+ "NegativeThinSpace;": '\U0000200B',
+ "NegativeVeryThinSpace;": '\U0000200B',
+ "NestedGreaterGreater;": '\U0000226B',
+ "NestedLessLess;": '\U0000226A',
+ "NewLine;": '\U0000000A',
+ "Nfr;": '\U0001D511',
+ "NoBreak;": '\U00002060',
+ "NonBreakingSpace;": '\U000000A0',
+ "Nopf;": '\U00002115',
+ "Not;": '\U00002AEC',
+ "NotCongruent;": '\U00002262',
+ "NotCupCap;": '\U0000226D',
+ "NotDoubleVerticalBar;": '\U00002226',
+ "NotElement;": '\U00002209',
+ "NotEqual;": '\U00002260',
+ "NotExists;": '\U00002204',
+ "NotGreater;": '\U0000226F',
+ "NotGreaterEqual;": '\U00002271',
+ "NotGreaterLess;": '\U00002279',
+ "NotGreaterTilde;": '\U00002275',
+ "NotLeftTriangle;": '\U000022EA',
+ "NotLeftTriangleEqual;": '\U000022EC',
+ "NotLess;": '\U0000226E',
+ "NotLessEqual;": '\U00002270',
+ "NotLessGreater;": '\U00002278',
+ "NotLessTilde;": '\U00002274',
+ "NotPrecedes;": '\U00002280',
+ "NotPrecedesSlantEqual;": '\U000022E0',
+ "NotReverseElement;": '\U0000220C',
+ "NotRightTriangle;": '\U000022EB',
+ "NotRightTriangleEqual;": '\U000022ED',
+ "NotSquareSubsetEqual;": '\U000022E2',
+ "NotSquareSupersetEqual;": '\U000022E3',
+ "NotSubsetEqual;": '\U00002288',
+ "NotSucceeds;": '\U00002281',
+ "NotSucceedsSlantEqual;": '\U000022E1',
+ "NotSupersetEqual;": '\U00002289',
+ "NotTilde;": '\U00002241',
+ "NotTildeEqual;": '\U00002244',
+ "NotTildeFullEqual;": '\U00002247',
+ "NotTildeTilde;": '\U00002249',
+ "NotVerticalBar;": '\U00002224',
+ "Nscr;": '\U0001D4A9',
+ "Ntilde;": '\U000000D1',
+ "Nu;": '\U0000039D',
+ "OElig;": '\U00000152',
+ "Oacute;": '\U000000D3',
+ "Ocirc;": '\U000000D4',
+ "Ocy;": '\U0000041E',
+ "Odblac;": '\U00000150',
+ "Ofr;": '\U0001D512',
+ "Ograve;": '\U000000D2',
+ "Omacr;": '\U0000014C',
+ "Omega;": '\U000003A9',
+ "Omicron;": '\U0000039F',
+ "Oopf;": '\U0001D546',
+ "OpenCurlyDoubleQuote;": '\U0000201C',
+ "OpenCurlyQuote;": '\U00002018',
+ "Or;": '\U00002A54',
+ "Oscr;": '\U0001D4AA',
+ "Oslash;": '\U000000D8',
+ "Otilde;": '\U000000D5',
+ "Otimes;": '\U00002A37',
+ "Ouml;": '\U000000D6',
+ "OverBar;": '\U0000203E',
+ "OverBrace;": '\U000023DE',
+ "OverBracket;": '\U000023B4',
+ "OverParenthesis;": '\U000023DC',
+ "PartialD;": '\U00002202',
+ "Pcy;": '\U0000041F',
+ "Pfr;": '\U0001D513',
+ "Phi;": '\U000003A6',
+ "Pi;": '\U000003A0',
+ "PlusMinus;": '\U000000B1',
+ "Poincareplane;": '\U0000210C',
+ "Popf;": '\U00002119',
+ "Pr;": '\U00002ABB',
+ "Precedes;": '\U0000227A',
+ "PrecedesEqual;": '\U00002AAF',
+ "PrecedesSlantEqual;": '\U0000227C',
+ "PrecedesTilde;": '\U0000227E',
+ "Prime;": '\U00002033',
+ "Product;": '\U0000220F',
+ "Proportion;": '\U00002237',
+ "Proportional;": '\U0000221D',
+ "Pscr;": '\U0001D4AB',
+ "Psi;": '\U000003A8',
+ "QUOT;": '\U00000022',
+ "Qfr;": '\U0001D514',
+ "Qopf;": '\U0000211A',
+ "Qscr;": '\U0001D4AC',
+ "RBarr;": '\U00002910',
+ "REG;": '\U000000AE',
+ "Racute;": '\U00000154',
+ "Rang;": '\U000027EB',
+ "Rarr;": '\U000021A0',
+ "Rarrtl;": '\U00002916',
+ "Rcaron;": '\U00000158',
+ "Rcedil;": '\U00000156',
+ "Rcy;": '\U00000420',
+ "Re;": '\U0000211C',
+ "ReverseElement;": '\U0000220B',
+ "ReverseEquilibrium;": '\U000021CB',
+ "ReverseUpEquilibrium;": '\U0000296F',
+ "Rfr;": '\U0000211C',
+ "Rho;": '\U000003A1',
+ "RightAngleBracket;": '\U000027E9',
+ "RightArrow;": '\U00002192',
+ "RightArrowBar;": '\U000021E5',
+ "RightArrowLeftArrow;": '\U000021C4',
+ "RightCeiling;": '\U00002309',
+ "RightDoubleBracket;": '\U000027E7',
+ "RightDownTeeVector;": '\U0000295D',
+ "RightDownVector;": '\U000021C2',
+ "RightDownVectorBar;": '\U00002955',
+ "RightFloor;": '\U0000230B',
+ "RightTee;": '\U000022A2',
+ "RightTeeArrow;": '\U000021A6',
+ "RightTeeVector;": '\U0000295B',
+ "RightTriangle;": '\U000022B3',
+ "RightTriangleBar;": '\U000029D0',
+ "RightTriangleEqual;": '\U000022B5',
+ "RightUpDownVector;": '\U0000294F',
+ "RightUpTeeVector;": '\U0000295C',
+ "RightUpVector;": '\U000021BE',
+ "RightUpVectorBar;": '\U00002954',
+ "RightVector;": '\U000021C0',
+ "RightVectorBar;": '\U00002953',
+ "Rightarrow;": '\U000021D2',
+ "Ropf;": '\U0000211D',
+ "RoundImplies;": '\U00002970',
+ "Rrightarrow;": '\U000021DB',
+ "Rscr;": '\U0000211B',
+ "Rsh;": '\U000021B1',
+ "RuleDelayed;": '\U000029F4',
+ "SHCHcy;": '\U00000429',
+ "SHcy;": '\U00000428',
+ "SOFTcy;": '\U0000042C',
+ "Sacute;": '\U0000015A',
+ "Sc;": '\U00002ABC',
+ "Scaron;": '\U00000160',
+ "Scedil;": '\U0000015E',
+ "Scirc;": '\U0000015C',
+ "Scy;": '\U00000421',
+ "Sfr;": '\U0001D516',
+ "ShortDownArrow;": '\U00002193',
+ "ShortLeftArrow;": '\U00002190',
+ "ShortRightArrow;": '\U00002192',
+ "ShortUpArrow;": '\U00002191',
+ "Sigma;": '\U000003A3',
+ "SmallCircle;": '\U00002218',
+ "Sopf;": '\U0001D54A',
+ "Sqrt;": '\U0000221A',
+ "Square;": '\U000025A1',
+ "SquareIntersection;": '\U00002293',
+ "SquareSubset;": '\U0000228F',
+ "SquareSubsetEqual;": '\U00002291',
+ "SquareSuperset;": '\U00002290',
+ "SquareSupersetEqual;": '\U00002292',
+ "SquareUnion;": '\U00002294',
+ "Sscr;": '\U0001D4AE',
+ "Star;": '\U000022C6',
+ "Sub;": '\U000022D0',
+ "Subset;": '\U000022D0',
+ "SubsetEqual;": '\U00002286',
+ "Succeeds;": '\U0000227B',
+ "SucceedsEqual;": '\U00002AB0',
+ "SucceedsSlantEqual;": '\U0000227D',
+ "SucceedsTilde;": '\U0000227F',
+ "SuchThat;": '\U0000220B',
+ "Sum;": '\U00002211',
+ "Sup;": '\U000022D1',
+ "Superset;": '\U00002283',
+ "SupersetEqual;": '\U00002287',
+ "Supset;": '\U000022D1',
+ "THORN;": '\U000000DE',
+ "TRADE;": '\U00002122',
+ "TSHcy;": '\U0000040B',
+ "TScy;": '\U00000426',
+ "Tab;": '\U00000009',
+ "Tau;": '\U000003A4',
+ "Tcaron;": '\U00000164',
+ "Tcedil;": '\U00000162',
+ "Tcy;": '\U00000422',
+ "Tfr;": '\U0001D517',
+ "Therefore;": '\U00002234',
+ "Theta;": '\U00000398',
+ "ThinSpace;": '\U00002009',
+ "Tilde;": '\U0000223C',
+ "TildeEqual;": '\U00002243',
+ "TildeFullEqual;": '\U00002245',
+ "TildeTilde;": '\U00002248',
+ "Topf;": '\U0001D54B',
+ "TripleDot;": '\U000020DB',
+ "Tscr;": '\U0001D4AF',
+ "Tstrok;": '\U00000166',
+ "Uacute;": '\U000000DA',
+ "Uarr;": '\U0000219F',
+ "Uarrocir;": '\U00002949',
+ "Ubrcy;": '\U0000040E',
+ "Ubreve;": '\U0000016C',
+ "Ucirc;": '\U000000DB',
+ "Ucy;": '\U00000423',
+ "Udblac;": '\U00000170',
+ "Ufr;": '\U0001D518',
+ "Ugrave;": '\U000000D9',
+ "Umacr;": '\U0000016A',
+ "UnderBar;": '\U0000005F',
+ "UnderBrace;": '\U000023DF',
+ "UnderBracket;": '\U000023B5',
+ "UnderParenthesis;": '\U000023DD',
+ "Union;": '\U000022C3',
+ "UnionPlus;": '\U0000228E',
+ "Uogon;": '\U00000172',
+ "Uopf;": '\U0001D54C',
+ "UpArrow;": '\U00002191',
+ "UpArrowBar;": '\U00002912',
+ "UpArrowDownArrow;": '\U000021C5',
+ "UpDownArrow;": '\U00002195',
+ "UpEquilibrium;": '\U0000296E',
+ "UpTee;": '\U000022A5',
+ "UpTeeArrow;": '\U000021A5',
+ "Uparrow;": '\U000021D1',
+ "Updownarrow;": '\U000021D5',
+ "UpperLeftArrow;": '\U00002196',
+ "UpperRightArrow;": '\U00002197',
+ "Upsi;": '\U000003D2',
+ "Upsilon;": '\U000003A5',
+ "Uring;": '\U0000016E',
+ "Uscr;": '\U0001D4B0',
+ "Utilde;": '\U00000168',
+ "Uuml;": '\U000000DC',
+ "VDash;": '\U000022AB',
+ "Vbar;": '\U00002AEB',
+ "Vcy;": '\U00000412',
+ "Vdash;": '\U000022A9',
+ "Vdashl;": '\U00002AE6',
+ "Vee;": '\U000022C1',
+ "Verbar;": '\U00002016',
+ "Vert;": '\U00002016',
+ "VerticalBar;": '\U00002223',
+ "VerticalLine;": '\U0000007C',
+ "VerticalSeparator;": '\U00002758',
+ "VerticalTilde;": '\U00002240',
+ "VeryThinSpace;": '\U0000200A',
+ "Vfr;": '\U0001D519',
+ "Vopf;": '\U0001D54D',
+ "Vscr;": '\U0001D4B1',
+ "Vvdash;": '\U000022AA',
+ "Wcirc;": '\U00000174',
+ "Wedge;": '\U000022C0',
+ "Wfr;": '\U0001D51A',
+ "Wopf;": '\U0001D54E',
+ "Wscr;": '\U0001D4B2',
+ "Xfr;": '\U0001D51B',
+ "Xi;": '\U0000039E',
+ "Xopf;": '\U0001D54F',
+ "Xscr;": '\U0001D4B3',
+ "YAcy;": '\U0000042F',
+ "YIcy;": '\U00000407',
+ "YUcy;": '\U0000042E',
+ "Yacute;": '\U000000DD',
+ "Ycirc;": '\U00000176',
+ "Ycy;": '\U0000042B',
+ "Yfr;": '\U0001D51C',
+ "Yopf;": '\U0001D550',
+ "Yscr;": '\U0001D4B4',
+ "Yuml;": '\U00000178',
+ "ZHcy;": '\U00000416',
+ "Zacute;": '\U00000179',
+ "Zcaron;": '\U0000017D',
+ "Zcy;": '\U00000417',
+ "Zdot;": '\U0000017B',
+ "ZeroWidthSpace;": '\U0000200B',
+ "Zeta;": '\U00000396',
+ "Zfr;": '\U00002128',
+ "Zopf;": '\U00002124',
+ "Zscr;": '\U0001D4B5',
+ "aacute;": '\U000000E1',
+ "abreve;": '\U00000103',
+ "ac;": '\U0000223E',
+ "acd;": '\U0000223F',
+ "acirc;": '\U000000E2',
+ "acute;": '\U000000B4',
+ "acy;": '\U00000430',
+ "aelig;": '\U000000E6',
+ "af;": '\U00002061',
+ "afr;": '\U0001D51E',
+ "agrave;": '\U000000E0',
+ "alefsym;": '\U00002135',
+ "aleph;": '\U00002135',
+ "alpha;": '\U000003B1',
+ "amacr;": '\U00000101',
+ "amalg;": '\U00002A3F',
+ "amp;": '\U00000026',
+ "and;": '\U00002227',
+ "andand;": '\U00002A55',
+ "andd;": '\U00002A5C',
+ "andslope;": '\U00002A58',
+ "andv;": '\U00002A5A',
+ "ang;": '\U00002220',
+ "ange;": '\U000029A4',
+ "angle;": '\U00002220',
+ "angmsd;": '\U00002221',
+ "angmsdaa;": '\U000029A8',
+ "angmsdab;": '\U000029A9',
+ "angmsdac;": '\U000029AA',
+ "angmsdad;": '\U000029AB',
+ "angmsdae;": '\U000029AC',
+ "angmsdaf;": '\U000029AD',
+ "angmsdag;": '\U000029AE',
+ "angmsdah;": '\U000029AF',
+ "angrt;": '\U0000221F',
+ "angrtvb;": '\U000022BE',
+ "angrtvbd;": '\U0000299D',
+ "angsph;": '\U00002222',
+ "angst;": '\U000000C5',
+ "angzarr;": '\U0000237C',
+ "aogon;": '\U00000105',
+ "aopf;": '\U0001D552',
+ "ap;": '\U00002248',
+ "apE;": '\U00002A70',
+ "apacir;": '\U00002A6F',
+ "ape;": '\U0000224A',
+ "apid;": '\U0000224B',
+ "apos;": '\U00000027',
+ "approx;": '\U00002248',
+ "approxeq;": '\U0000224A',
+ "aring;": '\U000000E5',
+ "ascr;": '\U0001D4B6',
+ "ast;": '\U0000002A',
+ "asymp;": '\U00002248',
+ "asympeq;": '\U0000224D',
+ "atilde;": '\U000000E3',
+ "auml;": '\U000000E4',
+ "awconint;": '\U00002233',
+ "awint;": '\U00002A11',
+ "bNot;": '\U00002AED',
+ "backcong;": '\U0000224C',
+ "backepsilon;": '\U000003F6',
+ "backprime;": '\U00002035',
+ "backsim;": '\U0000223D',
+ "backsimeq;": '\U000022CD',
+ "barvee;": '\U000022BD',
+ "barwed;": '\U00002305',
+ "barwedge;": '\U00002305',
+ "bbrk;": '\U000023B5',
+ "bbrktbrk;": '\U000023B6',
+ "bcong;": '\U0000224C',
+ "bcy;": '\U00000431',
+ "bdquo;": '\U0000201E',
+ "becaus;": '\U00002235',
+ "because;": '\U00002235',
+ "bemptyv;": '\U000029B0',
+ "bepsi;": '\U000003F6',
+ "bernou;": '\U0000212C',
+ "beta;": '\U000003B2',
+ "beth;": '\U00002136',
+ "between;": '\U0000226C',
+ "bfr;": '\U0001D51F',
+ "bigcap;": '\U000022C2',
+ "bigcirc;": '\U000025EF',
+ "bigcup;": '\U000022C3',
+ "bigodot;": '\U00002A00',
+ "bigoplus;": '\U00002A01',
+ "bigotimes;": '\U00002A02',
+ "bigsqcup;": '\U00002A06',
+ "bigstar;": '\U00002605',
+ "bigtriangledown;": '\U000025BD',
+ "bigtriangleup;": '\U000025B3',
+ "biguplus;": '\U00002A04',
+ "bigvee;": '\U000022C1',
+ "bigwedge;": '\U000022C0',
+ "bkarow;": '\U0000290D',
+ "blacklozenge;": '\U000029EB',
+ "blacksquare;": '\U000025AA',
+ "blacktriangle;": '\U000025B4',
+ "blacktriangledown;": '\U000025BE',
+ "blacktriangleleft;": '\U000025C2',
+ "blacktriangleright;": '\U000025B8',
+ "blank;": '\U00002423',
+ "blk12;": '\U00002592',
+ "blk14;": '\U00002591',
+ "blk34;": '\U00002593',
+ "block;": '\U00002588',
+ "bnot;": '\U00002310',
+ "bopf;": '\U0001D553',
+ "bot;": '\U000022A5',
+ "bottom;": '\U000022A5',
+ "bowtie;": '\U000022C8',
+ "boxDL;": '\U00002557',
+ "boxDR;": '\U00002554',
+ "boxDl;": '\U00002556',
+ "boxDr;": '\U00002553',
+ "boxH;": '\U00002550',
+ "boxHD;": '\U00002566',
+ "boxHU;": '\U00002569',
+ "boxHd;": '\U00002564',
+ "boxHu;": '\U00002567',
+ "boxUL;": '\U0000255D',
+ "boxUR;": '\U0000255A',
+ "boxUl;": '\U0000255C',
+ "boxUr;": '\U00002559',
+ "boxV;": '\U00002551',
+ "boxVH;": '\U0000256C',
+ "boxVL;": '\U00002563',
+ "boxVR;": '\U00002560',
+ "boxVh;": '\U0000256B',
+ "boxVl;": '\U00002562',
+ "boxVr;": '\U0000255F',
+ "boxbox;": '\U000029C9',
+ "boxdL;": '\U00002555',
+ "boxdR;": '\U00002552',
+ "boxdl;": '\U00002510',
+ "boxdr;": '\U0000250C',
+ "boxh;": '\U00002500',
+ "boxhD;": '\U00002565',
+ "boxhU;": '\U00002568',
+ "boxhd;": '\U0000252C',
+ "boxhu;": '\U00002534',
+ "boxminus;": '\U0000229F',
+ "boxplus;": '\U0000229E',
+ "boxtimes;": '\U000022A0',
+ "boxuL;": '\U0000255B',
+ "boxuR;": '\U00002558',
+ "boxul;": '\U00002518',
+ "boxur;": '\U00002514',
+ "boxv;": '\U00002502',
+ "boxvH;": '\U0000256A',
+ "boxvL;": '\U00002561',
+ "boxvR;": '\U0000255E',
+ "boxvh;": '\U0000253C',
+ "boxvl;": '\U00002524',
+ "boxvr;": '\U0000251C',
+ "bprime;": '\U00002035',
+ "breve;": '\U000002D8',
+ "brvbar;": '\U000000A6',
+ "bscr;": '\U0001D4B7',
+ "bsemi;": '\U0000204F',
+ "bsim;": '\U0000223D',
+ "bsime;": '\U000022CD',
+ "bsol;": '\U0000005C',
+ "bsolb;": '\U000029C5',
+ "bsolhsub;": '\U000027C8',
+ "bull;": '\U00002022',
+ "bullet;": '\U00002022',
+ "bump;": '\U0000224E',
+ "bumpE;": '\U00002AAE',
+ "bumpe;": '\U0000224F',
+ "bumpeq;": '\U0000224F',
+ "cacute;": '\U00000107',
+ "cap;": '\U00002229',
+ "capand;": '\U00002A44',
+ "capbrcup;": '\U00002A49',
+ "capcap;": '\U00002A4B',
+ "capcup;": '\U00002A47',
+ "capdot;": '\U00002A40',
+ "caret;": '\U00002041',
+ "caron;": '\U000002C7',
+ "ccaps;": '\U00002A4D',
+ "ccaron;": '\U0000010D',
+ "ccedil;": '\U000000E7',
+ "ccirc;": '\U00000109',
+ "ccups;": '\U00002A4C',
+ "ccupssm;": '\U00002A50',
+ "cdot;": '\U0000010B',
+ "cedil;": '\U000000B8',
+ "cemptyv;": '\U000029B2',
+ "cent;": '\U000000A2',
+ "centerdot;": '\U000000B7',
+ "cfr;": '\U0001D520',
+ "chcy;": '\U00000447',
+ "check;": '\U00002713',
+ "checkmark;": '\U00002713',
+ "chi;": '\U000003C7',
+ "cir;": '\U000025CB',
+ "cirE;": '\U000029C3',
+ "circ;": '\U000002C6',
+ "circeq;": '\U00002257',
+ "circlearrowleft;": '\U000021BA',
+ "circlearrowright;": '\U000021BB',
+ "circledR;": '\U000000AE',
+ "circledS;": '\U000024C8',
+ "circledast;": '\U0000229B',
+ "circledcirc;": '\U0000229A',
+ "circleddash;": '\U0000229D',
+ "cire;": '\U00002257',
+ "cirfnint;": '\U00002A10',
+ "cirmid;": '\U00002AEF',
+ "cirscir;": '\U000029C2',
+ "clubs;": '\U00002663',
+ "clubsuit;": '\U00002663',
+ "colon;": '\U0000003A',
+ "colone;": '\U00002254',
+ "coloneq;": '\U00002254',
+ "comma;": '\U0000002C',
+ "commat;": '\U00000040',
+ "comp;": '\U00002201',
+ "compfn;": '\U00002218',
+ "complement;": '\U00002201',
+ "complexes;": '\U00002102',
+ "cong;": '\U00002245',
+ "congdot;": '\U00002A6D',
+ "conint;": '\U0000222E',
+ "copf;": '\U0001D554',
+ "coprod;": '\U00002210',
+ "copy;": '\U000000A9',
+ "copysr;": '\U00002117',
+ "crarr;": '\U000021B5',
+ "cross;": '\U00002717',
+ "cscr;": '\U0001D4B8',
+ "csub;": '\U00002ACF',
+ "csube;": '\U00002AD1',
+ "csup;": '\U00002AD0',
+ "csupe;": '\U00002AD2',
+ "ctdot;": '\U000022EF',
+ "cudarrl;": '\U00002938',
+ "cudarrr;": '\U00002935',
+ "cuepr;": '\U000022DE',
+ "cuesc;": '\U000022DF',
+ "cularr;": '\U000021B6',
+ "cularrp;": '\U0000293D',
+ "cup;": '\U0000222A',
+ "cupbrcap;": '\U00002A48',
+ "cupcap;": '\U00002A46',
+ "cupcup;": '\U00002A4A',
+ "cupdot;": '\U0000228D',
+ "cupor;": '\U00002A45',
+ "curarr;": '\U000021B7',
+ "curarrm;": '\U0000293C',
+ "curlyeqprec;": '\U000022DE',
+ "curlyeqsucc;": '\U000022DF',
+ "curlyvee;": '\U000022CE',
+ "curlywedge;": '\U000022CF',
+ "curren;": '\U000000A4',
+ "curvearrowleft;": '\U000021B6',
+ "curvearrowright;": '\U000021B7',
+ "cuvee;": '\U000022CE',
+ "cuwed;": '\U000022CF',
+ "cwconint;": '\U00002232',
+ "cwint;": '\U00002231',
+ "cylcty;": '\U0000232D',
+ "dArr;": '\U000021D3',
+ "dHar;": '\U00002965',
+ "dagger;": '\U00002020',
+ "daleth;": '\U00002138',
+ "darr;": '\U00002193',
+ "dash;": '\U00002010',
+ "dashv;": '\U000022A3',
+ "dbkarow;": '\U0000290F',
+ "dblac;": '\U000002DD',
+ "dcaron;": '\U0000010F',
+ "dcy;": '\U00000434',
+ "dd;": '\U00002146',
+ "ddagger;": '\U00002021',
+ "ddarr;": '\U000021CA',
+ "ddotseq;": '\U00002A77',
+ "deg;": '\U000000B0',
+ "delta;": '\U000003B4',
+ "demptyv;": '\U000029B1',
+ "dfisht;": '\U0000297F',
+ "dfr;": '\U0001D521',
+ "dharl;": '\U000021C3',
+ "dharr;": '\U000021C2',
+ "diam;": '\U000022C4',
+ "diamond;": '\U000022C4',
+ "diamondsuit;": '\U00002666',
+ "diams;": '\U00002666',
+ "die;": '\U000000A8',
+ "digamma;": '\U000003DD',
+ "disin;": '\U000022F2',
+ "div;": '\U000000F7',
+ "divide;": '\U000000F7',
+ "divideontimes;": '\U000022C7',
+ "divonx;": '\U000022C7',
+ "djcy;": '\U00000452',
+ "dlcorn;": '\U0000231E',
+ "dlcrop;": '\U0000230D',
+ "dollar;": '\U00000024',
+ "dopf;": '\U0001D555',
+ "dot;": '\U000002D9',
+ "doteq;": '\U00002250',
+ "doteqdot;": '\U00002251',
+ "dotminus;": '\U00002238',
+ "dotplus;": '\U00002214',
+ "dotsquare;": '\U000022A1',
+ "doublebarwedge;": '\U00002306',
+ "downarrow;": '\U00002193',
+ "downdownarrows;": '\U000021CA',
+ "downharpoonleft;": '\U000021C3',
+ "downharpoonright;": '\U000021C2',
+ "drbkarow;": '\U00002910',
+ "drcorn;": '\U0000231F',
+ "drcrop;": '\U0000230C',
+ "dscr;": '\U0001D4B9',
+ "dscy;": '\U00000455',
+ "dsol;": '\U000029F6',
+ "dstrok;": '\U00000111',
+ "dtdot;": '\U000022F1',
+ "dtri;": '\U000025BF',
+ "dtrif;": '\U000025BE',
+ "duarr;": '\U000021F5',
+ "duhar;": '\U0000296F',
+ "dwangle;": '\U000029A6',
+ "dzcy;": '\U0000045F',
+ "dzigrarr;": '\U000027FF',
+ "eDDot;": '\U00002A77',
+ "eDot;": '\U00002251',
+ "eacute;": '\U000000E9',
+ "easter;": '\U00002A6E',
+ "ecaron;": '\U0000011B',
+ "ecir;": '\U00002256',
+ "ecirc;": '\U000000EA',
+ "ecolon;": '\U00002255',
+ "ecy;": '\U0000044D',
+ "edot;": '\U00000117',
+ "ee;": '\U00002147',
+ "efDot;": '\U00002252',
+ "efr;": '\U0001D522',
+ "eg;": '\U00002A9A',
+ "egrave;": '\U000000E8',
+ "egs;": '\U00002A96',
+ "egsdot;": '\U00002A98',
+ "el;": '\U00002A99',
+ "elinters;": '\U000023E7',
+ "ell;": '\U00002113',
+ "els;": '\U00002A95',
+ "elsdot;": '\U00002A97',
+ "emacr;": '\U00000113',
+ "empty;": '\U00002205',
+ "emptyset;": '\U00002205',
+ "emptyv;": '\U00002205',
+ "emsp;": '\U00002003',
+ "emsp13;": '\U00002004',
+ "emsp14;": '\U00002005',
+ "eng;": '\U0000014B',
+ "ensp;": '\U00002002',
+ "eogon;": '\U00000119',
+ "eopf;": '\U0001D556',
+ "epar;": '\U000022D5',
+ "eparsl;": '\U000029E3',
+ "eplus;": '\U00002A71',
+ "epsi;": '\U000003B5',
+ "epsilon;": '\U000003B5',
+ "epsiv;": '\U000003F5',
+ "eqcirc;": '\U00002256',
+ "eqcolon;": '\U00002255',
+ "eqsim;": '\U00002242',
+ "eqslantgtr;": '\U00002A96',
+ "eqslantless;": '\U00002A95',
+ "equals;": '\U0000003D',
+ "equest;": '\U0000225F',
+ "equiv;": '\U00002261',
+ "equivDD;": '\U00002A78',
+ "eqvparsl;": '\U000029E5',
+ "erDot;": '\U00002253',
+ "erarr;": '\U00002971',
+ "escr;": '\U0000212F',
+ "esdot;": '\U00002250',
+ "esim;": '\U00002242',
+ "eta;": '\U000003B7',
+ "eth;": '\U000000F0',
+ "euml;": '\U000000EB',
+ "euro;": '\U000020AC',
+ "excl;": '\U00000021',
+ "exist;": '\U00002203',
+ "expectation;": '\U00002130',
+ "exponentiale;": '\U00002147',
+ "fallingdotseq;": '\U00002252',
+ "fcy;": '\U00000444',
+ "female;": '\U00002640',
+ "ffilig;": '\U0000FB03',
+ "fflig;": '\U0000FB00',
+ "ffllig;": '\U0000FB04',
+ "ffr;": '\U0001D523',
+ "filig;": '\U0000FB01',
+ "flat;": '\U0000266D',
+ "fllig;": '\U0000FB02',
+ "fltns;": '\U000025B1',
+ "fnof;": '\U00000192',
+ "fopf;": '\U0001D557',
+ "forall;": '\U00002200',
+ "fork;": '\U000022D4',
+ "forkv;": '\U00002AD9',
+ "fpartint;": '\U00002A0D',
+ "frac12;": '\U000000BD',
+ "frac13;": '\U00002153',
+ "frac14;": '\U000000BC',
+ "frac15;": '\U00002155',
+ "frac16;": '\U00002159',
+ "frac18;": '\U0000215B',
+ "frac23;": '\U00002154',
+ "frac25;": '\U00002156',
+ "frac34;": '\U000000BE',
+ "frac35;": '\U00002157',
+ "frac38;": '\U0000215C',
+ "frac45;": '\U00002158',
+ "frac56;": '\U0000215A',
+ "frac58;": '\U0000215D',
+ "frac78;": '\U0000215E',
+ "frasl;": '\U00002044',
+ "frown;": '\U00002322',
+ "fscr;": '\U0001D4BB',
+ "gE;": '\U00002267',
+ "gEl;": '\U00002A8C',
+ "gacute;": '\U000001F5',
+ "gamma;": '\U000003B3',
+ "gammad;": '\U000003DD',
+ "gap;": '\U00002A86',
+ "gbreve;": '\U0000011F',
+ "gcirc;": '\U0000011D',
+ "gcy;": '\U00000433',
+ "gdot;": '\U00000121',
+ "ge;": '\U00002265',
+ "gel;": '\U000022DB',
+ "geq;": '\U00002265',
+ "geqq;": '\U00002267',
+ "geqslant;": '\U00002A7E',
+ "ges;": '\U00002A7E',
+ "gescc;": '\U00002AA9',
+ "gesdot;": '\U00002A80',
+ "gesdoto;": '\U00002A82',
+ "gesdotol;": '\U00002A84',
+ "gesles;": '\U00002A94',
+ "gfr;": '\U0001D524',
+ "gg;": '\U0000226B',
+ "ggg;": '\U000022D9',
+ "gimel;": '\U00002137',
+ "gjcy;": '\U00000453',
+ "gl;": '\U00002277',
+ "glE;": '\U00002A92',
+ "gla;": '\U00002AA5',
+ "glj;": '\U00002AA4',
+ "gnE;": '\U00002269',
+ "gnap;": '\U00002A8A',
+ "gnapprox;": '\U00002A8A',
+ "gne;": '\U00002A88',
+ "gneq;": '\U00002A88',
+ "gneqq;": '\U00002269',
+ "gnsim;": '\U000022E7',
+ "gopf;": '\U0001D558',
+ "grave;": '\U00000060',
+ "gscr;": '\U0000210A',
+ "gsim;": '\U00002273',
+ "gsime;": '\U00002A8E',
+ "gsiml;": '\U00002A90',
+ "gt;": '\U0000003E',
+ "gtcc;": '\U00002AA7',
+ "gtcir;": '\U00002A7A',
+ "gtdot;": '\U000022D7',
+ "gtlPar;": '\U00002995',
+ "gtquest;": '\U00002A7C',
+ "gtrapprox;": '\U00002A86',
+ "gtrarr;": '\U00002978',
+ "gtrdot;": '\U000022D7',
+ "gtreqless;": '\U000022DB',
+ "gtreqqless;": '\U00002A8C',
+ "gtrless;": '\U00002277',
+ "gtrsim;": '\U00002273',
+ "hArr;": '\U000021D4',
+ "hairsp;": '\U0000200A',
+ "half;": '\U000000BD',
+ "hamilt;": '\U0000210B',
+ "hardcy;": '\U0000044A',
+ "harr;": '\U00002194',
+ "harrcir;": '\U00002948',
+ "harrw;": '\U000021AD',
+ "hbar;": '\U0000210F',
+ "hcirc;": '\U00000125',
+ "hearts;": '\U00002665',
+ "heartsuit;": '\U00002665',
+ "hellip;": '\U00002026',
+ "hercon;": '\U000022B9',
+ "hfr;": '\U0001D525',
+ "hksearow;": '\U00002925',
+ "hkswarow;": '\U00002926',
+ "hoarr;": '\U000021FF',
+ "homtht;": '\U0000223B',
+ "hookleftarrow;": '\U000021A9',
+ "hookrightarrow;": '\U000021AA',
+ "hopf;": '\U0001D559',
+ "horbar;": '\U00002015',
+ "hscr;": '\U0001D4BD',
+ "hslash;": '\U0000210F',
+ "hstrok;": '\U00000127',
+ "hybull;": '\U00002043',
+ "hyphen;": '\U00002010',
+ "iacute;": '\U000000ED',
+ "ic;": '\U00002063',
+ "icirc;": '\U000000EE',
+ "icy;": '\U00000438',
+ "iecy;": '\U00000435',
+ "iexcl;": '\U000000A1',
+ "iff;": '\U000021D4',
+ "ifr;": '\U0001D526',
+ "igrave;": '\U000000EC',
+ "ii;": '\U00002148',
+ "iiiint;": '\U00002A0C',
+ "iiint;": '\U0000222D',
+ "iinfin;": '\U000029DC',
+ "iiota;": '\U00002129',
+ "ijlig;": '\U00000133',
+ "imacr;": '\U0000012B',
+ "image;": '\U00002111',
+ "imagline;": '\U00002110',
+ "imagpart;": '\U00002111',
+ "imath;": '\U00000131',
+ "imof;": '\U000022B7',
+ "imped;": '\U000001B5',
+ "in;": '\U00002208',
+ "incare;": '\U00002105',
+ "infin;": '\U0000221E',
+ "infintie;": '\U000029DD',
+ "inodot;": '\U00000131',
+ "int;": '\U0000222B',
+ "intcal;": '\U000022BA',
+ "integers;": '\U00002124',
+ "intercal;": '\U000022BA',
+ "intlarhk;": '\U00002A17',
+ "intprod;": '\U00002A3C',
+ "iocy;": '\U00000451',
+ "iogon;": '\U0000012F',
+ "iopf;": '\U0001D55A',
+ "iota;": '\U000003B9',
+ "iprod;": '\U00002A3C',
+ "iquest;": '\U000000BF',
+ "iscr;": '\U0001D4BE',
+ "isin;": '\U00002208',
+ "isinE;": '\U000022F9',
+ "isindot;": '\U000022F5',
+ "isins;": '\U000022F4',
+ "isinsv;": '\U000022F3',
+ "isinv;": '\U00002208',
+ "it;": '\U00002062',
+ "itilde;": '\U00000129',
+ "iukcy;": '\U00000456',
+ "iuml;": '\U000000EF',
+ "jcirc;": '\U00000135',
+ "jcy;": '\U00000439',
+ "jfr;": '\U0001D527',
+ "jmath;": '\U00000237',
+ "jopf;": '\U0001D55B',
+ "jscr;": '\U0001D4BF',
+ "jsercy;": '\U00000458',
+ "jukcy;": '\U00000454',
+ "kappa;": '\U000003BA',
+ "kappav;": '\U000003F0',
+ "kcedil;": '\U00000137',
+ "kcy;": '\U0000043A',
+ "kfr;": '\U0001D528',
+ "kgreen;": '\U00000138',
+ "khcy;": '\U00000445',
+ "kjcy;": '\U0000045C',
+ "kopf;": '\U0001D55C',
+ "kscr;": '\U0001D4C0',
+ "lAarr;": '\U000021DA',
+ "lArr;": '\U000021D0',
+ "lAtail;": '\U0000291B',
+ "lBarr;": '\U0000290E',
+ "lE;": '\U00002266',
+ "lEg;": '\U00002A8B',
+ "lHar;": '\U00002962',
+ "lacute;": '\U0000013A',
+ "laemptyv;": '\U000029B4',
+ "lagran;": '\U00002112',
+ "lambda;": '\U000003BB',
+ "lang;": '\U000027E8',
+ "langd;": '\U00002991',
+ "langle;": '\U000027E8',
+ "lap;": '\U00002A85',
+ "laquo;": '\U000000AB',
+ "larr;": '\U00002190',
+ "larrb;": '\U000021E4',
+ "larrbfs;": '\U0000291F',
+ "larrfs;": '\U0000291D',
+ "larrhk;": '\U000021A9',
+ "larrlp;": '\U000021AB',
+ "larrpl;": '\U00002939',
+ "larrsim;": '\U00002973',
+ "larrtl;": '\U000021A2',
+ "lat;": '\U00002AAB',
+ "latail;": '\U00002919',
+ "late;": '\U00002AAD',
+ "lbarr;": '\U0000290C',
+ "lbbrk;": '\U00002772',
+ "lbrace;": '\U0000007B',
+ "lbrack;": '\U0000005B',
+ "lbrke;": '\U0000298B',
+ "lbrksld;": '\U0000298F',
+ "lbrkslu;": '\U0000298D',
+ "lcaron;": '\U0000013E',
+ "lcedil;": '\U0000013C',
+ "lceil;": '\U00002308',
+ "lcub;": '\U0000007B',
+ "lcy;": '\U0000043B',
+ "ldca;": '\U00002936',
+ "ldquo;": '\U0000201C',
+ "ldquor;": '\U0000201E',
+ "ldrdhar;": '\U00002967',
+ "ldrushar;": '\U0000294B',
+ "ldsh;": '\U000021B2',
+ "le;": '\U00002264',
+ "leftarrow;": '\U00002190',
+ "leftarrowtail;": '\U000021A2',
+ "leftharpoondown;": '\U000021BD',
+ "leftharpoonup;": '\U000021BC',
+ "leftleftarrows;": '\U000021C7',
+ "leftrightarrow;": '\U00002194',
+ "leftrightarrows;": '\U000021C6',
+ "leftrightharpoons;": '\U000021CB',
+ "leftrightsquigarrow;": '\U000021AD',
+ "leftthreetimes;": '\U000022CB',
+ "leg;": '\U000022DA',
+ "leq;": '\U00002264',
+ "leqq;": '\U00002266',
+ "leqslant;": '\U00002A7D',
+ "les;": '\U00002A7D',
+ "lescc;": '\U00002AA8',
+ "lesdot;": '\U00002A7F',
+ "lesdoto;": '\U00002A81',
+ "lesdotor;": '\U00002A83',
+ "lesges;": '\U00002A93',
+ "lessapprox;": '\U00002A85',
+ "lessdot;": '\U000022D6',
+ "lesseqgtr;": '\U000022DA',
+ "lesseqqgtr;": '\U00002A8B',
+ "lessgtr;": '\U00002276',
+ "lesssim;": '\U00002272',
+ "lfisht;": '\U0000297C',
+ "lfloor;": '\U0000230A',
+ "lfr;": '\U0001D529',
+ "lg;": '\U00002276',
+ "lgE;": '\U00002A91',
+ "lhard;": '\U000021BD',
+ "lharu;": '\U000021BC',
+ "lharul;": '\U0000296A',
+ "lhblk;": '\U00002584',
+ "ljcy;": '\U00000459',
+ "ll;": '\U0000226A',
+ "llarr;": '\U000021C7',
+ "llcorner;": '\U0000231E',
+ "llhard;": '\U0000296B',
+ "lltri;": '\U000025FA',
+ "lmidot;": '\U00000140',
+ "lmoust;": '\U000023B0',
+ "lmoustache;": '\U000023B0',
+ "lnE;": '\U00002268',
+ "lnap;": '\U00002A89',
+ "lnapprox;": '\U00002A89',
+ "lne;": '\U00002A87',
+ "lneq;": '\U00002A87',
+ "lneqq;": '\U00002268',
+ "lnsim;": '\U000022E6',
+ "loang;": '\U000027EC',
+ "loarr;": '\U000021FD',
+ "lobrk;": '\U000027E6',
+ "longleftarrow;": '\U000027F5',
+ "longleftrightarrow;": '\U000027F7',
+ "longmapsto;": '\U000027FC',
+ "longrightarrow;": '\U000027F6',
+ "looparrowleft;": '\U000021AB',
+ "looparrowright;": '\U000021AC',
+ "lopar;": '\U00002985',
+ "lopf;": '\U0001D55D',
+ "loplus;": '\U00002A2D',
+ "lotimes;": '\U00002A34',
+ "lowast;": '\U00002217',
+ "lowbar;": '\U0000005F',
+ "loz;": '\U000025CA',
+ "lozenge;": '\U000025CA',
+ "lozf;": '\U000029EB',
+ "lpar;": '\U00000028',
+ "lparlt;": '\U00002993',
+ "lrarr;": '\U000021C6',
+ "lrcorner;": '\U0000231F',
+ "lrhar;": '\U000021CB',
+ "lrhard;": '\U0000296D',
+ "lrm;": '\U0000200E',
+ "lrtri;": '\U000022BF',
+ "lsaquo;": '\U00002039',
+ "lscr;": '\U0001D4C1',
+ "lsh;": '\U000021B0',
+ "lsim;": '\U00002272',
+ "lsime;": '\U00002A8D',
+ "lsimg;": '\U00002A8F',
+ "lsqb;": '\U0000005B',
+ "lsquo;": '\U00002018',
+ "lsquor;": '\U0000201A',
+ "lstrok;": '\U00000142',
+ "lt;": '\U0000003C',
+ "ltcc;": '\U00002AA6',
+ "ltcir;": '\U00002A79',
+ "ltdot;": '\U000022D6',
+ "lthree;": '\U000022CB',
+ "ltimes;": '\U000022C9',
+ "ltlarr;": '\U00002976',
+ "ltquest;": '\U00002A7B',
+ "ltrPar;": '\U00002996',
+ "ltri;": '\U000025C3',
+ "ltrie;": '\U000022B4',
+ "ltrif;": '\U000025C2',
+ "lurdshar;": '\U0000294A',
+ "luruhar;": '\U00002966',
+ "mDDot;": '\U0000223A',
+ "macr;": '\U000000AF',
+ "male;": '\U00002642',
+ "malt;": '\U00002720',
+ "maltese;": '\U00002720',
+ "map;": '\U000021A6',
+ "mapsto;": '\U000021A6',
+ "mapstodown;": '\U000021A7',
+ "mapstoleft;": '\U000021A4',
+ "mapstoup;": '\U000021A5',
+ "marker;": '\U000025AE',
+ "mcomma;": '\U00002A29',
+ "mcy;": '\U0000043C',
+ "mdash;": '\U00002014',
+ "measuredangle;": '\U00002221',
+ "mfr;": '\U0001D52A',
+ "mho;": '\U00002127',
+ "micro;": '\U000000B5',
+ "mid;": '\U00002223',
+ "midast;": '\U0000002A',
+ "midcir;": '\U00002AF0',
+ "middot;": '\U000000B7',
+ "minus;": '\U00002212',
+ "minusb;": '\U0000229F',
+ "minusd;": '\U00002238',
+ "minusdu;": '\U00002A2A',
+ "mlcp;": '\U00002ADB',
+ "mldr;": '\U00002026',
+ "mnplus;": '\U00002213',
+ "models;": '\U000022A7',
+ "mopf;": '\U0001D55E',
+ "mp;": '\U00002213',
+ "mscr;": '\U0001D4C2',
+ "mstpos;": '\U0000223E',
+ "mu;": '\U000003BC',
+ "multimap;": '\U000022B8',
+ "mumap;": '\U000022B8',
+ "nLeftarrow;": '\U000021CD',
+ "nLeftrightarrow;": '\U000021CE',
+ "nRightarrow;": '\U000021CF',
+ "nVDash;": '\U000022AF',
+ "nVdash;": '\U000022AE',
+ "nabla;": '\U00002207',
+ "nacute;": '\U00000144',
+ "nap;": '\U00002249',
+ "napos;": '\U00000149',
+ "napprox;": '\U00002249',
+ "natur;": '\U0000266E',
+ "natural;": '\U0000266E',
+ "naturals;": '\U00002115',
+ "nbsp;": '\U000000A0',
+ "ncap;": '\U00002A43',
+ "ncaron;": '\U00000148',
+ "ncedil;": '\U00000146',
+ "ncong;": '\U00002247',
+ "ncup;": '\U00002A42',
+ "ncy;": '\U0000043D',
+ "ndash;": '\U00002013',
+ "ne;": '\U00002260',
+ "neArr;": '\U000021D7',
+ "nearhk;": '\U00002924',
+ "nearr;": '\U00002197',
+ "nearrow;": '\U00002197',
+ "nequiv;": '\U00002262',
+ "nesear;": '\U00002928',
+ "nexist;": '\U00002204',
+ "nexists;": '\U00002204',
+ "nfr;": '\U0001D52B',
+ "nge;": '\U00002271',
+ "ngeq;": '\U00002271',
+ "ngsim;": '\U00002275',
+ "ngt;": '\U0000226F',
+ "ngtr;": '\U0000226F',
+ "nhArr;": '\U000021CE',
+ "nharr;": '\U000021AE',
+ "nhpar;": '\U00002AF2',
+ "ni;": '\U0000220B',
+ "nis;": '\U000022FC',
+ "nisd;": '\U000022FA',
+ "niv;": '\U0000220B',
+ "njcy;": '\U0000045A',
+ "nlArr;": '\U000021CD',
+ "nlarr;": '\U0000219A',
+ "nldr;": '\U00002025',
+ "nle;": '\U00002270',
+ "nleftarrow;": '\U0000219A',
+ "nleftrightarrow;": '\U000021AE',
+ "nleq;": '\U00002270',
+ "nless;": '\U0000226E',
+ "nlsim;": '\U00002274',
+ "nlt;": '\U0000226E',
+ "nltri;": '\U000022EA',
+ "nltrie;": '\U000022EC',
+ "nmid;": '\U00002224',
+ "nopf;": '\U0001D55F',
+ "not;": '\U000000AC',
+ "notin;": '\U00002209',
+ "notinva;": '\U00002209',
+ "notinvb;": '\U000022F7',
+ "notinvc;": '\U000022F6',
+ "notni;": '\U0000220C',
+ "notniva;": '\U0000220C',
+ "notnivb;": '\U000022FE',
+ "notnivc;": '\U000022FD',
+ "npar;": '\U00002226',
+ "nparallel;": '\U00002226',
+ "npolint;": '\U00002A14',
+ "npr;": '\U00002280',
+ "nprcue;": '\U000022E0',
+ "nprec;": '\U00002280',
+ "nrArr;": '\U000021CF',
+ "nrarr;": '\U0000219B',
+ "nrightarrow;": '\U0000219B',
+ "nrtri;": '\U000022EB',
+ "nrtrie;": '\U000022ED',
+ "nsc;": '\U00002281',
+ "nsccue;": '\U000022E1',
+ "nscr;": '\U0001D4C3',
+ "nshortmid;": '\U00002224',
+ "nshortparallel;": '\U00002226',
+ "nsim;": '\U00002241',
+ "nsime;": '\U00002244',
+ "nsimeq;": '\U00002244',
+ "nsmid;": '\U00002224',
+ "nspar;": '\U00002226',
+ "nsqsube;": '\U000022E2',
+ "nsqsupe;": '\U000022E3',
+ "nsub;": '\U00002284',
+ "nsube;": '\U00002288',
+ "nsubseteq;": '\U00002288',
+ "nsucc;": '\U00002281',
+ "nsup;": '\U00002285',
+ "nsupe;": '\U00002289',
+ "nsupseteq;": '\U00002289',
+ "ntgl;": '\U00002279',
+ "ntilde;": '\U000000F1',
+ "ntlg;": '\U00002278',
+ "ntriangleleft;": '\U000022EA',
+ "ntrianglelefteq;": '\U000022EC',
+ "ntriangleright;": '\U000022EB',
+ "ntrianglerighteq;": '\U000022ED',
+ "nu;": '\U000003BD',
+ "num;": '\U00000023',
+ "numero;": '\U00002116',
+ "numsp;": '\U00002007',
+ "nvDash;": '\U000022AD',
+ "nvHarr;": '\U00002904',
+ "nvdash;": '\U000022AC',
+ "nvinfin;": '\U000029DE',
+ "nvlArr;": '\U00002902',
+ "nvrArr;": '\U00002903',
+ "nwArr;": '\U000021D6',
+ "nwarhk;": '\U00002923',
+ "nwarr;": '\U00002196',
+ "nwarrow;": '\U00002196',
+ "nwnear;": '\U00002927',
+ "oS;": '\U000024C8',
+ "oacute;": '\U000000F3',
+ "oast;": '\U0000229B',
+ "ocir;": '\U0000229A',
+ "ocirc;": '\U000000F4',
+ "ocy;": '\U0000043E',
+ "odash;": '\U0000229D',
+ "odblac;": '\U00000151',
+ "odiv;": '\U00002A38',
+ "odot;": '\U00002299',
+ "odsold;": '\U000029BC',
+ "oelig;": '\U00000153',
+ "ofcir;": '\U000029BF',
+ "ofr;": '\U0001D52C',
+ "ogon;": '\U000002DB',
+ "ograve;": '\U000000F2',
+ "ogt;": '\U000029C1',
+ "ohbar;": '\U000029B5',
+ "ohm;": '\U000003A9',
+ "oint;": '\U0000222E',
+ "olarr;": '\U000021BA',
+ "olcir;": '\U000029BE',
+ "olcross;": '\U000029BB',
+ "oline;": '\U0000203E',
+ "olt;": '\U000029C0',
+ "omacr;": '\U0000014D',
+ "omega;": '\U000003C9',
+ "omicron;": '\U000003BF',
+ "omid;": '\U000029B6',
+ "ominus;": '\U00002296',
+ "oopf;": '\U0001D560',
+ "opar;": '\U000029B7',
+ "operp;": '\U000029B9',
+ "oplus;": '\U00002295',
+ "or;": '\U00002228',
+ "orarr;": '\U000021BB',
+ "ord;": '\U00002A5D',
+ "order;": '\U00002134',
+ "orderof;": '\U00002134',
+ "ordf;": '\U000000AA',
+ "ordm;": '\U000000BA',
+ "origof;": '\U000022B6',
+ "oror;": '\U00002A56',
+ "orslope;": '\U00002A57',
+ "orv;": '\U00002A5B',
+ "oscr;": '\U00002134',
+ "oslash;": '\U000000F8',
+ "osol;": '\U00002298',
+ "otilde;": '\U000000F5',
+ "otimes;": '\U00002297',
+ "otimesas;": '\U00002A36',
+ "ouml;": '\U000000F6',
+ "ovbar;": '\U0000233D',
+ "par;": '\U00002225',
+ "para;": '\U000000B6',
+ "parallel;": '\U00002225',
+ "parsim;": '\U00002AF3',
+ "parsl;": '\U00002AFD',
+ "part;": '\U00002202',
+ "pcy;": '\U0000043F',
+ "percnt;": '\U00000025',
+ "period;": '\U0000002E',
+ "permil;": '\U00002030',
+ "perp;": '\U000022A5',
+ "pertenk;": '\U00002031',
+ "pfr;": '\U0001D52D',
+ "phi;": '\U000003C6',
+ "phiv;": '\U000003D5',
+ "phmmat;": '\U00002133',
+ "phone;": '\U0000260E',
+ "pi;": '\U000003C0',
+ "pitchfork;": '\U000022D4',
+ "piv;": '\U000003D6',
+ "planck;": '\U0000210F',
+ "planckh;": '\U0000210E',
+ "plankv;": '\U0000210F',
+ "plus;": '\U0000002B',
+ "plusacir;": '\U00002A23',
+ "plusb;": '\U0000229E',
+ "pluscir;": '\U00002A22',
+ "plusdo;": '\U00002214',
+ "plusdu;": '\U00002A25',
+ "pluse;": '\U00002A72',
+ "plusmn;": '\U000000B1',
+ "plussim;": '\U00002A26',
+ "plustwo;": '\U00002A27',
+ "pm;": '\U000000B1',
+ "pointint;": '\U00002A15',
+ "popf;": '\U0001D561',
+ "pound;": '\U000000A3',
+ "pr;": '\U0000227A',
+ "prE;": '\U00002AB3',
+ "prap;": '\U00002AB7',
+ "prcue;": '\U0000227C',
+ "pre;": '\U00002AAF',
+ "prec;": '\U0000227A',
+ "precapprox;": '\U00002AB7',
+ "preccurlyeq;": '\U0000227C',
+ "preceq;": '\U00002AAF',
+ "precnapprox;": '\U00002AB9',
+ "precneqq;": '\U00002AB5',
+ "precnsim;": '\U000022E8',
+ "precsim;": '\U0000227E',
+ "prime;": '\U00002032',
+ "primes;": '\U00002119',
+ "prnE;": '\U00002AB5',
+ "prnap;": '\U00002AB9',
+ "prnsim;": '\U000022E8',
+ "prod;": '\U0000220F',
+ "profalar;": '\U0000232E',
+ "profline;": '\U00002312',
+ "profsurf;": '\U00002313',
+ "prop;": '\U0000221D',
+ "propto;": '\U0000221D',
+ "prsim;": '\U0000227E',
+ "prurel;": '\U000022B0',
+ "pscr;": '\U0001D4C5',
+ "psi;": '\U000003C8',
+ "puncsp;": '\U00002008',
+ "qfr;": '\U0001D52E',
+ "qint;": '\U00002A0C',
+ "qopf;": '\U0001D562',
+ "qprime;": '\U00002057',
+ "qscr;": '\U0001D4C6',
+ "quaternions;": '\U0000210D',
+ "quatint;": '\U00002A16',
+ "quest;": '\U0000003F',
+ "questeq;": '\U0000225F',
+ "quot;": '\U00000022',
+ "rAarr;": '\U000021DB',
+ "rArr;": '\U000021D2',
+ "rAtail;": '\U0000291C',
+ "rBarr;": '\U0000290F',
+ "rHar;": '\U00002964',
+ "racute;": '\U00000155',
+ "radic;": '\U0000221A',
+ "raemptyv;": '\U000029B3',
+ "rang;": '\U000027E9',
+ "rangd;": '\U00002992',
+ "range;": '\U000029A5',
+ "rangle;": '\U000027E9',
+ "raquo;": '\U000000BB',
+ "rarr;": '\U00002192',
+ "rarrap;": '\U00002975',
+ "rarrb;": '\U000021E5',
+ "rarrbfs;": '\U00002920',
+ "rarrc;": '\U00002933',
+ "rarrfs;": '\U0000291E',
+ "rarrhk;": '\U000021AA',
+ "rarrlp;": '\U000021AC',
+ "rarrpl;": '\U00002945',
+ "rarrsim;": '\U00002974',
+ "rarrtl;": '\U000021A3',
+ "rarrw;": '\U0000219D',
+ "ratail;": '\U0000291A',
+ "ratio;": '\U00002236',
+ "rationals;": '\U0000211A',
+ "rbarr;": '\U0000290D',
+ "rbbrk;": '\U00002773',
+ "rbrace;": '\U0000007D',
+ "rbrack;": '\U0000005D',
+ "rbrke;": '\U0000298C',
+ "rbrksld;": '\U0000298E',
+ "rbrkslu;": '\U00002990',
+ "rcaron;": '\U00000159',
+ "rcedil;": '\U00000157',
+ "rceil;": '\U00002309',
+ "rcub;": '\U0000007D',
+ "rcy;": '\U00000440',
+ "rdca;": '\U00002937',
+ "rdldhar;": '\U00002969',
+ "rdquo;": '\U0000201D',
+ "rdquor;": '\U0000201D',
+ "rdsh;": '\U000021B3',
+ "real;": '\U0000211C',
+ "realine;": '\U0000211B',
+ "realpart;": '\U0000211C',
+ "reals;": '\U0000211D',
+ "rect;": '\U000025AD',
+ "reg;": '\U000000AE',
+ "rfisht;": '\U0000297D',
+ "rfloor;": '\U0000230B',
+ "rfr;": '\U0001D52F',
+ "rhard;": '\U000021C1',
+ "rharu;": '\U000021C0',
+ "rharul;": '\U0000296C',
+ "rho;": '\U000003C1',
+ "rhov;": '\U000003F1',
+ "rightarrow;": '\U00002192',
+ "rightarrowtail;": '\U000021A3',
+ "rightharpoondown;": '\U000021C1',
+ "rightharpoonup;": '\U000021C0',
+ "rightleftarrows;": '\U000021C4',
+ "rightleftharpoons;": '\U000021CC',
+ "rightrightarrows;": '\U000021C9',
+ "rightsquigarrow;": '\U0000219D',
+ "rightthreetimes;": '\U000022CC',
+ "ring;": '\U000002DA',
+ "risingdotseq;": '\U00002253',
+ "rlarr;": '\U000021C4',
+ "rlhar;": '\U000021CC',
+ "rlm;": '\U0000200F',
+ "rmoust;": '\U000023B1',
+ "rmoustache;": '\U000023B1',
+ "rnmid;": '\U00002AEE',
+ "roang;": '\U000027ED',
+ "roarr;": '\U000021FE',
+ "robrk;": '\U000027E7',
+ "ropar;": '\U00002986',
+ "ropf;": '\U0001D563',
+ "roplus;": '\U00002A2E',
+ "rotimes;": '\U00002A35',
+ "rpar;": '\U00000029',
+ "rpargt;": '\U00002994',
+ "rppolint;": '\U00002A12',
+ "rrarr;": '\U000021C9',
+ "rsaquo;": '\U0000203A',
+ "rscr;": '\U0001D4C7',
+ "rsh;": '\U000021B1',
+ "rsqb;": '\U0000005D',
+ "rsquo;": '\U00002019',
+ "rsquor;": '\U00002019',
+ "rthree;": '\U000022CC',
+ "rtimes;": '\U000022CA',
+ "rtri;": '\U000025B9',
+ "rtrie;": '\U000022B5',
+ "rtrif;": '\U000025B8',
+ "rtriltri;": '\U000029CE',
+ "ruluhar;": '\U00002968',
+ "rx;": '\U0000211E',
+ "sacute;": '\U0000015B',
+ "sbquo;": '\U0000201A',
+ "sc;": '\U0000227B',
+ "scE;": '\U00002AB4',
+ "scap;": '\U00002AB8',
+ "scaron;": '\U00000161',
+ "sccue;": '\U0000227D',
+ "sce;": '\U00002AB0',
+ "scedil;": '\U0000015F',
+ "scirc;": '\U0000015D',
+ "scnE;": '\U00002AB6',
+ "scnap;": '\U00002ABA',
+ "scnsim;": '\U000022E9',
+ "scpolint;": '\U00002A13',
+ "scsim;": '\U0000227F',
+ "scy;": '\U00000441',
+ "sdot;": '\U000022C5',
+ "sdotb;": '\U000022A1',
+ "sdote;": '\U00002A66',
+ "seArr;": '\U000021D8',
+ "searhk;": '\U00002925',
+ "searr;": '\U00002198',
+ "searrow;": '\U00002198',
+ "sect;": '\U000000A7',
+ "semi;": '\U0000003B',
+ "seswar;": '\U00002929',
+ "setminus;": '\U00002216',
+ "setmn;": '\U00002216',
+ "sext;": '\U00002736',
+ "sfr;": '\U0001D530',
+ "sfrown;": '\U00002322',
+ "sharp;": '\U0000266F',
+ "shchcy;": '\U00000449',
+ "shcy;": '\U00000448',
+ "shortmid;": '\U00002223',
+ "shortparallel;": '\U00002225',
+ "shy;": '\U000000AD',
+ "sigma;": '\U000003C3',
+ "sigmaf;": '\U000003C2',
+ "sigmav;": '\U000003C2',
+ "sim;": '\U0000223C',
+ "simdot;": '\U00002A6A',
+ "sime;": '\U00002243',
+ "simeq;": '\U00002243',
+ "simg;": '\U00002A9E',
+ "simgE;": '\U00002AA0',
+ "siml;": '\U00002A9D',
+ "simlE;": '\U00002A9F',
+ "simne;": '\U00002246',
+ "simplus;": '\U00002A24',
+ "simrarr;": '\U00002972',
+ "slarr;": '\U00002190',
+ "smallsetminus;": '\U00002216',
+ "smashp;": '\U00002A33',
+ "smeparsl;": '\U000029E4',
+ "smid;": '\U00002223',
+ "smile;": '\U00002323',
+ "smt;": '\U00002AAA',
+ "smte;": '\U00002AAC',
+ "softcy;": '\U0000044C',
+ "sol;": '\U0000002F',
+ "solb;": '\U000029C4',
+ "solbar;": '\U0000233F',
+ "sopf;": '\U0001D564',
+ "spades;": '\U00002660',
+ "spadesuit;": '\U00002660',
+ "spar;": '\U00002225',
+ "sqcap;": '\U00002293',
+ "sqcup;": '\U00002294',
+ "sqsub;": '\U0000228F',
+ "sqsube;": '\U00002291',
+ "sqsubset;": '\U0000228F',
+ "sqsubseteq;": '\U00002291',
+ "sqsup;": '\U00002290',
+ "sqsupe;": '\U00002292',
+ "sqsupset;": '\U00002290',
+ "sqsupseteq;": '\U00002292',
+ "squ;": '\U000025A1',
+ "square;": '\U000025A1',
+ "squarf;": '\U000025AA',
+ "squf;": '\U000025AA',
+ "srarr;": '\U00002192',
+ "sscr;": '\U0001D4C8',
+ "ssetmn;": '\U00002216',
+ "ssmile;": '\U00002323',
+ "sstarf;": '\U000022C6',
+ "star;": '\U00002606',
+ "starf;": '\U00002605',
+ "straightepsilon;": '\U000003F5',
+ "straightphi;": '\U000003D5',
+ "strns;": '\U000000AF',
+ "sub;": '\U00002282',
+ "subE;": '\U00002AC5',
+ "subdot;": '\U00002ABD',
+ "sube;": '\U00002286',
+ "subedot;": '\U00002AC3',
+ "submult;": '\U00002AC1',
+ "subnE;": '\U00002ACB',
+ "subne;": '\U0000228A',
+ "subplus;": '\U00002ABF',
+ "subrarr;": '\U00002979',
+ "subset;": '\U00002282',
+ "subseteq;": '\U00002286',
+ "subseteqq;": '\U00002AC5',
+ "subsetneq;": '\U0000228A',
+ "subsetneqq;": '\U00002ACB',
+ "subsim;": '\U00002AC7',
+ "subsub;": '\U00002AD5',
+ "subsup;": '\U00002AD3',
+ "succ;": '\U0000227B',
+ "succapprox;": '\U00002AB8',
+ "succcurlyeq;": '\U0000227D',
+ "succeq;": '\U00002AB0',
+ "succnapprox;": '\U00002ABA',
+ "succneqq;": '\U00002AB6',
+ "succnsim;": '\U000022E9',
+ "succsim;": '\U0000227F',
+ "sum;": '\U00002211',
+ "sung;": '\U0000266A',
+ "sup;": '\U00002283',
+ "sup1;": '\U000000B9',
+ "sup2;": '\U000000B2',
+ "sup3;": '\U000000B3',
+ "supE;": '\U00002AC6',
+ "supdot;": '\U00002ABE',
+ "supdsub;": '\U00002AD8',
+ "supe;": '\U00002287',
+ "supedot;": '\U00002AC4',
+ "suphsol;": '\U000027C9',
+ "suphsub;": '\U00002AD7',
+ "suplarr;": '\U0000297B',
+ "supmult;": '\U00002AC2',
+ "supnE;": '\U00002ACC',
+ "supne;": '\U0000228B',
+ "supplus;": '\U00002AC0',
+ "supset;": '\U00002283',
+ "supseteq;": '\U00002287',
+ "supseteqq;": '\U00002AC6',
+ "supsetneq;": '\U0000228B',
+ "supsetneqq;": '\U00002ACC',
+ "supsim;": '\U00002AC8',
+ "supsub;": '\U00002AD4',
+ "supsup;": '\U00002AD6',
+ "swArr;": '\U000021D9',
+ "swarhk;": '\U00002926',
+ "swarr;": '\U00002199',
+ "swarrow;": '\U00002199',
+ "swnwar;": '\U0000292A',
+ "szlig;": '\U000000DF',
+ "target;": '\U00002316',
+ "tau;": '\U000003C4',
+ "tbrk;": '\U000023B4',
+ "tcaron;": '\U00000165',
+ "tcedil;": '\U00000163',
+ "tcy;": '\U00000442',
+ "tdot;": '\U000020DB',
+ "telrec;": '\U00002315',
+ "tfr;": '\U0001D531',
+ "there4;": '\U00002234',
+ "therefore;": '\U00002234',
+ "theta;": '\U000003B8',
+ "thetasym;": '\U000003D1',
+ "thetav;": '\U000003D1',
+ "thickapprox;": '\U00002248',
+ "thicksim;": '\U0000223C',
+ "thinsp;": '\U00002009',
+ "thkap;": '\U00002248',
+ "thksim;": '\U0000223C',
+ "thorn;": '\U000000FE',
+ "tilde;": '\U000002DC',
+ "times;": '\U000000D7',
+ "timesb;": '\U000022A0',
+ "timesbar;": '\U00002A31',
+ "timesd;": '\U00002A30',
+ "tint;": '\U0000222D',
+ "toea;": '\U00002928',
+ "top;": '\U000022A4',
+ "topbot;": '\U00002336',
+ "topcir;": '\U00002AF1',
+ "topf;": '\U0001D565',
+ "topfork;": '\U00002ADA',
+ "tosa;": '\U00002929',
+ "tprime;": '\U00002034',
+ "trade;": '\U00002122',
+ "triangle;": '\U000025B5',
+ "triangledown;": '\U000025BF',
+ "triangleleft;": '\U000025C3',
+ "trianglelefteq;": '\U000022B4',
+ "triangleq;": '\U0000225C',
+ "triangleright;": '\U000025B9',
+ "trianglerighteq;": '\U000022B5',
+ "tridot;": '\U000025EC',
+ "trie;": '\U0000225C',
+ "triminus;": '\U00002A3A',
+ "triplus;": '\U00002A39',
+ "trisb;": '\U000029CD',
+ "tritime;": '\U00002A3B',
+ "trpezium;": '\U000023E2',
+ "tscr;": '\U0001D4C9',
+ "tscy;": '\U00000446',
+ "tshcy;": '\U0000045B',
+ "tstrok;": '\U00000167',
+ "twixt;": '\U0000226C',
+ "twoheadleftarrow;": '\U0000219E',
+ "twoheadrightarrow;": '\U000021A0',
+ "uArr;": '\U000021D1',
+ "uHar;": '\U00002963',
+ "uacute;": '\U000000FA',
+ "uarr;": '\U00002191',
+ "ubrcy;": '\U0000045E',
+ "ubreve;": '\U0000016D',
+ "ucirc;": '\U000000FB',
+ "ucy;": '\U00000443',
+ "udarr;": '\U000021C5',
+ "udblac;": '\U00000171',
+ "udhar;": '\U0000296E',
+ "ufisht;": '\U0000297E',
+ "ufr;": '\U0001D532',
+ "ugrave;": '\U000000F9',
+ "uharl;": '\U000021BF',
+ "uharr;": '\U000021BE',
+ "uhblk;": '\U00002580',
+ "ulcorn;": '\U0000231C',
+ "ulcorner;": '\U0000231C',
+ "ulcrop;": '\U0000230F',
+ "ultri;": '\U000025F8',
+ "umacr;": '\U0000016B',
+ "uml;": '\U000000A8',
+ "uogon;": '\U00000173',
+ "uopf;": '\U0001D566',
+ "uparrow;": '\U00002191',
+ "updownarrow;": '\U00002195',
+ "upharpoonleft;": '\U000021BF',
+ "upharpoonright;": '\U000021BE',
+ "uplus;": '\U0000228E',
+ "upsi;": '\U000003C5',
+ "upsih;": '\U000003D2',
+ "upsilon;": '\U000003C5',
+ "upuparrows;": '\U000021C8',
+ "urcorn;": '\U0000231D',
+ "urcorner;": '\U0000231D',
+ "urcrop;": '\U0000230E',
+ "uring;": '\U0000016F',
+ "urtri;": '\U000025F9',
+ "uscr;": '\U0001D4CA',
+ "utdot;": '\U000022F0',
+ "utilde;": '\U00000169',
+ "utri;": '\U000025B5',
+ "utrif;": '\U000025B4',
+ "uuarr;": '\U000021C8',
+ "uuml;": '\U000000FC',
+ "uwangle;": '\U000029A7',
+ "vArr;": '\U000021D5',
+ "vBar;": '\U00002AE8',
+ "vBarv;": '\U00002AE9',
+ "vDash;": '\U000022A8',
+ "vangrt;": '\U0000299C',
+ "varepsilon;": '\U000003F5',
+ "varkappa;": '\U000003F0',
+ "varnothing;": '\U00002205',
+ "varphi;": '\U000003D5',
+ "varpi;": '\U000003D6',
+ "varpropto;": '\U0000221D',
+ "varr;": '\U00002195',
+ "varrho;": '\U000003F1',
+ "varsigma;": '\U000003C2',
+ "vartheta;": '\U000003D1',
+ "vartriangleleft;": '\U000022B2',
+ "vartriangleright;": '\U000022B3',
+ "vcy;": '\U00000432',
+ "vdash;": '\U000022A2',
+ "vee;": '\U00002228',
+ "veebar;": '\U000022BB',
+ "veeeq;": '\U0000225A',
+ "vellip;": '\U000022EE',
+ "verbar;": '\U0000007C',
+ "vert;": '\U0000007C',
+ "vfr;": '\U0001D533',
+ "vltri;": '\U000022B2',
+ "vopf;": '\U0001D567',
+ "vprop;": '\U0000221D',
+ "vrtri;": '\U000022B3',
+ "vscr;": '\U0001D4CB',
+ "vzigzag;": '\U0000299A',
+ "wcirc;": '\U00000175',
+ "wedbar;": '\U00002A5F',
+ "wedge;": '\U00002227',
+ "wedgeq;": '\U00002259',
+ "weierp;": '\U00002118',
+ "wfr;": '\U0001D534',
+ "wopf;": '\U0001D568',
+ "wp;": '\U00002118',
+ "wr;": '\U00002240',
+ "wreath;": '\U00002240',
+ "wscr;": '\U0001D4CC',
+ "xcap;": '\U000022C2',
+ "xcirc;": '\U000025EF',
+ "xcup;": '\U000022C3',
+ "xdtri;": '\U000025BD',
+ "xfr;": '\U0001D535',
+ "xhArr;": '\U000027FA',
+ "xharr;": '\U000027F7',
+ "xi;": '\U000003BE',
+ "xlArr;": '\U000027F8',
+ "xlarr;": '\U000027F5',
+ "xmap;": '\U000027FC',
+ "xnis;": '\U000022FB',
+ "xodot;": '\U00002A00',
+ "xopf;": '\U0001D569',
+ "xoplus;": '\U00002A01',
+ "xotime;": '\U00002A02',
+ "xrArr;": '\U000027F9',
+ "xrarr;": '\U000027F6',
+ "xscr;": '\U0001D4CD',
+ "xsqcup;": '\U00002A06',
+ "xuplus;": '\U00002A04',
+ "xutri;": '\U000025B3',
+ "xvee;": '\U000022C1',
+ "xwedge;": '\U000022C0',
+ "yacute;": '\U000000FD',
+ "yacy;": '\U0000044F',
+ "ycirc;": '\U00000177',
+ "ycy;": '\U0000044B',
+ "yen;": '\U000000A5',
+ "yfr;": '\U0001D536',
+ "yicy;": '\U00000457',
+ "yopf;": '\U0001D56A',
+ "yscr;": '\U0001D4CE',
+ "yucy;": '\U0000044E',
+ "yuml;": '\U000000FF',
+ "zacute;": '\U0000017A',
+ "zcaron;": '\U0000017E',
+ "zcy;": '\U00000437',
+ "zdot;": '\U0000017C',
+ "zeetrf;": '\U00002128',
+ "zeta;": '\U000003B6',
+ "zfr;": '\U0001D537',
+ "zhcy;": '\U00000436',
+ "zigrarr;": '\U000021DD',
+ "zopf;": '\U0001D56B',
+ "zscr;": '\U0001D4CF',
+ "zwj;": '\U0000200D',
+ "zwnj;": '\U0000200C',
+ "AElig": '\U000000C6',
+ "AMP": '\U00000026',
+ "Aacute": '\U000000C1',
+ "Acirc": '\U000000C2',
+ "Agrave": '\U000000C0',
+ "Aring": '\U000000C5',
+ "Atilde": '\U000000C3',
+ "Auml": '\U000000C4',
+ "COPY": '\U000000A9',
+ "Ccedil": '\U000000C7',
+ "ETH": '\U000000D0',
+ "Eacute": '\U000000C9',
+ "Ecirc": '\U000000CA',
+ "Egrave": '\U000000C8',
+ "Euml": '\U000000CB',
+ "GT": '\U0000003E',
+ "Iacute": '\U000000CD',
+ "Icirc": '\U000000CE',
+ "Igrave": '\U000000CC',
+ "Iuml": '\U000000CF',
+ "LT": '\U0000003C',
+ "Ntilde": '\U000000D1',
+ "Oacute": '\U000000D3',
+ "Ocirc": '\U000000D4',
+ "Ograve": '\U000000D2',
+ "Oslash": '\U000000D8',
+ "Otilde": '\U000000D5',
+ "Ouml": '\U000000D6',
+ "QUOT": '\U00000022',
+ "REG": '\U000000AE',
+ "THORN": '\U000000DE',
+ "Uacute": '\U000000DA',
+ "Ucirc": '\U000000DB',
+ "Ugrave": '\U000000D9',
+ "Uuml": '\U000000DC',
+ "Yacute": '\U000000DD',
+ "aacute": '\U000000E1',
+ "acirc": '\U000000E2',
+ "acute": '\U000000B4',
+ "aelig": '\U000000E6',
+ "agrave": '\U000000E0',
+ "amp": '\U00000026',
+ "aring": '\U000000E5',
+ "atilde": '\U000000E3',
+ "auml": '\U000000E4',
+ "brvbar": '\U000000A6',
+ "ccedil": '\U000000E7',
+ "cedil": '\U000000B8',
+ "cent": '\U000000A2',
+ "copy": '\U000000A9',
+ "curren": '\U000000A4',
+ "deg": '\U000000B0',
+ "divide": '\U000000F7',
+ "eacute": '\U000000E9',
+ "ecirc": '\U000000EA',
+ "egrave": '\U000000E8',
+ "eth": '\U000000F0',
+ "euml": '\U000000EB',
+ "frac12": '\U000000BD',
+ "frac14": '\U000000BC',
+ "frac34": '\U000000BE',
+ "gt": '\U0000003E',
+ "iacute": '\U000000ED',
+ "icirc": '\U000000EE',
+ "iexcl": '\U000000A1',
+ "igrave": '\U000000EC',
+ "iquest": '\U000000BF',
+ "iuml": '\U000000EF',
+ "laquo": '\U000000AB',
+ "lt": '\U0000003C',
+ "macr": '\U000000AF',
+ "micro": '\U000000B5',
+ "middot": '\U000000B7',
+ "nbsp": '\U000000A0',
+ "not": '\U000000AC',
+ "ntilde": '\U000000F1',
+ "oacute": '\U000000F3',
+ "ocirc": '\U000000F4',
+ "ograve": '\U000000F2',
+ "ordf": '\U000000AA',
+ "ordm": '\U000000BA',
+ "oslash": '\U000000F8',
+ "otilde": '\U000000F5',
+ "ouml": '\U000000F6',
+ "para": '\U000000B6',
+ "plusmn": '\U000000B1',
+ "pound": '\U000000A3',
+ "quot": '\U00000022',
+ "raquo": '\U000000BB',
+ "reg": '\U000000AE',
+ "sect": '\U000000A7',
+ "shy": '\U000000AD',
+ "sup1": '\U000000B9',
+ "sup2": '\U000000B2',
+ "sup3": '\U000000B3',
+ "szlig": '\U000000DF',
+ "thorn": '\U000000FE',
+ "times": '\U000000D7',
+ "uacute": '\U000000FA',
+ "ucirc": '\U000000FB',
+ "ugrave": '\U000000F9',
+ "uml": '\U000000A8',
+ "uuml": '\U000000FC',
+ "yacute": '\U000000FD',
+ "yen": '\U000000A5',
+ "yuml": '\U000000FF',
+}
+
+// HTML entities that are two unicode codepoints.
+var entity2 = map[string][2]rune{
+ // TODO(nigeltao): Handle replacements that are wider than their names.
+ // "nLt;": {'\u226A', '\u20D2'},
+ // "nGt;": {'\u226B', '\u20D2'},
+ "NotEqualTilde;": {'\u2242', '\u0338'},
+ "NotGreaterFullEqual;": {'\u2267', '\u0338'},
+ "NotGreaterGreater;": {'\u226B', '\u0338'},
+ "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'},
+ "NotHumpDownHump;": {'\u224E', '\u0338'},
+ "NotHumpEqual;": {'\u224F', '\u0338'},
+ "NotLeftTriangleBar;": {'\u29CF', '\u0338'},
+ "NotLessLess;": {'\u226A', '\u0338'},
+ "NotLessSlantEqual;": {'\u2A7D', '\u0338'},
+ "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'},
+ "NotNestedLessLess;": {'\u2AA1', '\u0338'},
+ "NotPrecedesEqual;": {'\u2AAF', '\u0338'},
+ "NotRightTriangleBar;": {'\u29D0', '\u0338'},
+ "NotSquareSubset;": {'\u228F', '\u0338'},
+ "NotSquareSuperset;": {'\u2290', '\u0338'},
+ "NotSubset;": {'\u2282', '\u20D2'},
+ "NotSucceedsEqual;": {'\u2AB0', '\u0338'},
+ "NotSucceedsTilde;": {'\u227F', '\u0338'},
+ "NotSuperset;": {'\u2283', '\u20D2'},
+ "ThickSpace;": {'\u205F', '\u200A'},
+ "acE;": {'\u223E', '\u0333'},
+ "bne;": {'\u003D', '\u20E5'},
+ "bnequiv;": {'\u2261', '\u20E5'},
+ "caps;": {'\u2229', '\uFE00'},
+ "cups;": {'\u222A', '\uFE00'},
+ "fjlig;": {'\u0066', '\u006A'},
+ "gesl;": {'\u22DB', '\uFE00'},
+ "gvertneqq;": {'\u2269', '\uFE00'},
+ "gvnE;": {'\u2269', '\uFE00'},
+ "lates;": {'\u2AAD', '\uFE00'},
+ "lesg;": {'\u22DA', '\uFE00'},
+ "lvertneqq;": {'\u2268', '\uFE00'},
+ "lvnE;": {'\u2268', '\uFE00'},
+ "nGg;": {'\u22D9', '\u0338'},
+ "nGtv;": {'\u226B', '\u0338'},
+ "nLl;": {'\u22D8', '\u0338'},
+ "nLtv;": {'\u226A', '\u0338'},
+ "nang;": {'\u2220', '\u20D2'},
+ "napE;": {'\u2A70', '\u0338'},
+ "napid;": {'\u224B', '\u0338'},
+ "nbump;": {'\u224E', '\u0338'},
+ "nbumpe;": {'\u224F', '\u0338'},
+ "ncongdot;": {'\u2A6D', '\u0338'},
+ "nedot;": {'\u2250', '\u0338'},
+ "nesim;": {'\u2242', '\u0338'},
+ "ngE;": {'\u2267', '\u0338'},
+ "ngeqq;": {'\u2267', '\u0338'},
+ "ngeqslant;": {'\u2A7E', '\u0338'},
+ "nges;": {'\u2A7E', '\u0338'},
+ "nlE;": {'\u2266', '\u0338'},
+ "nleqq;": {'\u2266', '\u0338'},
+ "nleqslant;": {'\u2A7D', '\u0338'},
+ "nles;": {'\u2A7D', '\u0338'},
+ "notinE;": {'\u22F9', '\u0338'},
+ "notindot;": {'\u22F5', '\u0338'},
+ "nparsl;": {'\u2AFD', '\u20E5'},
+ "npart;": {'\u2202', '\u0338'},
+ "npre;": {'\u2AAF', '\u0338'},
+ "npreceq;": {'\u2AAF', '\u0338'},
+ "nrarrc;": {'\u2933', '\u0338'},
+ "nrarrw;": {'\u219D', '\u0338'},
+ "nsce;": {'\u2AB0', '\u0338'},
+ "nsubE;": {'\u2AC5', '\u0338'},
+ "nsubset;": {'\u2282', '\u20D2'},
+ "nsubseteqq;": {'\u2AC5', '\u0338'},
+ "nsucceq;": {'\u2AB0', '\u0338'},
+ "nsupE;": {'\u2AC6', '\u0338'},
+ "nsupset;": {'\u2283', '\u20D2'},
+ "nsupseteqq;": {'\u2AC6', '\u0338'},
+ "nvap;": {'\u224D', '\u20D2'},
+ "nvge;": {'\u2265', '\u20D2'},
+ "nvgt;": {'\u003E', '\u20D2'},
+ "nvle;": {'\u2264', '\u20D2'},
+ "nvlt;": {'\u003C', '\u20D2'},
+ "nvltrie;": {'\u22B4', '\u20D2'},
+ "nvrtrie;": {'\u22B5', '\u20D2'},
+ "nvsim;": {'\u223C', '\u20D2'},
+ "race;": {'\u223D', '\u0331'},
+ "smtes;": {'\u2AAC', '\uFE00'},
+ "sqcaps;": {'\u2293', '\uFE00'},
+ "sqcups;": {'\u2294', '\uFE00'},
+ "varsubsetneq;": {'\u228A', '\uFE00'},
+ "varsubsetneqq;": {'\u2ACB', '\uFE00'},
+ "varsupsetneq;": {'\u228B', '\uFE00'},
+ "varsupsetneqq;": {'\u2ACC', '\uFE00'},
+ "vnsub;": {'\u2282', '\u20D2'},
+ "vnsup;": {'\u2283', '\u20D2'},
+ "vsubnE;": {'\u2ACB', '\uFE00'},
+ "vsubne;": {'\u228A', '\uFE00'},
+ "vsupnE;": {'\u2ACC', '\uFE00'},
+ "vsupne;": {'\u228B', '\uFE00'},
+}
diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go
new file mode 100644
index 0000000..04c6bec
--- /dev/null
+++ b/vendor/golang.org/x/net/html/escape.go
@@ -0,0 +1,339 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bytes"
+ "strings"
+ "unicode/utf8"
+)
+
+// These replacements permit compatibility with old numeric entities that
+// assumed Windows-1252 encoding.
+// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
+var replacementTable = [...]rune{
+ '\u20AC', // First entry is what 0x80 should be replaced with.
+ '\u0081',
+ '\u201A',
+ '\u0192',
+ '\u201E',
+ '\u2026',
+ '\u2020',
+ '\u2021',
+ '\u02C6',
+ '\u2030',
+ '\u0160',
+ '\u2039',
+ '\u0152',
+ '\u008D',
+ '\u017D',
+ '\u008F',
+ '\u0090',
+ '\u2018',
+ '\u2019',
+ '\u201C',
+ '\u201D',
+ '\u2022',
+ '\u2013',
+ '\u2014',
+ '\u02DC',
+ '\u2122',
+ '\u0161',
+ '\u203A',
+ '\u0153',
+ '\u009D',
+ '\u017E',
+ '\u0178', // Last entry is 0x9F.
+ // 0x00->'\uFFFD' is handled programmatically.
+ // 0x0D->'\u000D' is a no-op.
+}
+
+// unescapeEntity reads an entity like "<" from b[src:] and writes the
+// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
+// Precondition: b[src] == '&' && dst <= src.
+// attribute should be true if parsing an attribute value.
+func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {
+ // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
+
+ // i starts at 1 because we already know that s[0] == '&'.
+ i, s := 1, b[src:]
+
+ if len(s) <= 1 {
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+
+ if s[i] == '#' {
+ if len(s) <= 3 { // We need to have at least ".".
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+ i++
+ c := s[i]
+ hex := false
+ if c == 'x' || c == 'X' {
+ hex = true
+ i++
+ }
+
+ x := '\x00'
+ for i < len(s) {
+ c = s[i]
+ i++
+ if hex {
+ if '0' <= c && c <= '9' {
+ x = 16*x + rune(c) - '0'
+ continue
+ } else if 'a' <= c && c <= 'f' {
+ x = 16*x + rune(c) - 'a' + 10
+ continue
+ } else if 'A' <= c && c <= 'F' {
+ x = 16*x + rune(c) - 'A' + 10
+ continue
+ }
+ } else if '0' <= c && c <= '9' {
+ x = 10*x + rune(c) - '0'
+ continue
+ }
+ if c != ';' {
+ i--
+ }
+ break
+ }
+
+ if i <= 3 { // No characters matched.
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+
+ if 0x80 <= x && x <= 0x9F {
+ // Replace characters from Windows-1252 with UTF-8 equivalents.
+ x = replacementTable[x-0x80]
+ } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
+ // Replace invalid characters with the replacement character.
+ x = '\uFFFD'
+ }
+
+ return dst + utf8.EncodeRune(b[dst:], x), src + i
+ }
+
+ // Consume the maximum number of characters possible, with the
+ // consumed characters matching one of the named references.
+
+ for i < len(s) {
+ c := s[i]
+ i++
+ // Lower-cased characters are more common in entities, so we check for them first.
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
+ continue
+ }
+ if c != ';' {
+ i--
+ }
+ break
+ }
+
+ entityName := string(s[1:i])
+ if entityName == "" {
+ // No-op.
+ } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
+ // No-op.
+ } else if x := entity[entityName]; x != 0 {
+ return dst + utf8.EncodeRune(b[dst:], x), src + i
+ } else if x := entity2[entityName]; x[0] != 0 {
+ dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
+ return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
+ } else if !attribute {
+ maxLen := len(entityName) - 1
+ if maxLen > longestEntityWithoutSemicolon {
+ maxLen = longestEntityWithoutSemicolon
+ }
+ for j := maxLen; j > 1; j-- {
+ if x := entity[entityName[:j]]; x != 0 {
+ return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
+ }
+ }
+ }
+
+ dst1, src1 = dst+i, src+i
+ copy(b[dst:dst1], b[src:src1])
+ return dst1, src1
+}
+
+// unescape unescapes b's entities in-place, so that "a<b" becomes "a' byte that, per above, we'd like to avoid escaping unless we have to.
+//
+// Studying the summary table (and T actions in its '>' column) closely, we
+// only need to escape in states 43, 44, 49, 51 and 52. State 43 is at the
+// start of the comment data. State 52 is after a '!'. The other three states
+// are after a '-'.
+//
+// Our algorithm is thus to escape every '&' and to escape '>' if and only if:
+// - The '>' is after a '!' or '-' (in the unescaped data) or
+// - The '>' is at the start of the comment data (after the opening ""); err != nil {
+ return err
+ }
+ return nil
+ case DoctypeNode:
+ if _, err := w.WriteString("')
+ case RawNode:
+ _, err := w.WriteString(n.Data)
+ return err
+ default:
+ return errors.New("html: unknown node type")
+ }
+
+ // Render the opening tag.
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ for _, a := range n.Attr {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ if a.Namespace != "" {
+ if _, err := w.WriteString(a.Namespace); err != nil {
+ return err
+ }
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if _, err := w.WriteString(a.Key); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(`="`); err != nil {
+ return err
+ }
+ if err := escape(w, a.Val); err != nil {
+ return err
+ }
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ }
+ if voidElements[n.Data] {
+ if n.FirstChild != nil {
+ return fmt.Errorf("html: void element <%s> has child nodes", n.Data)
+ }
+ _, err := w.WriteString("/>")
+ return err
+ }
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+
+ // Add initial newline where there is danger of a newline beging ignored.
+ if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
+ switch n.Data {
+ case "pre", "listing", "textarea":
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Render any child nodes
+ if childTextNodesAreLiteral(n) {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if c.Type == TextNode {
+ if _, err := w.WriteString(c.Data); err != nil {
+ return err
+ }
+ } else {
+ if err := render1(w, c); err != nil {
+ return err
+ }
+ }
+ }
+ if n.Data == "plaintext" {
+ // Don't render anything else. must be the
+ // last element in the file, with no closing tag.
+ return plaintextAbort
+ }
+ } else {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if err := render1(w, c); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Render the closing tag.
+ if _, err := w.WriteString(""); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ return w.WriteByte('>')
+}
+
+func childTextNodesAreLiteral(n *Node) bool {
+ // Per WHATWG HTML 13.3, if the parent of the current node is a style,
+ // script, xmp, iframe, noembed, noframes, or plaintext element, and the
+ // current node is a text node, append the value of the node's data
+ // literally. The specification is not explicit about it, but we only
+ // enforce this if we are in the HTML namespace (i.e. when the namespace is
+ // "").
+ // NOTE: we also always include noscript elements, although the
+ // specification states that they should only be rendered as such if
+ // scripting is enabled for the node (which is not something we track).
+ if n.Namespace != "" {
+ return false
+ }
+ switch n.Data {
+ case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
+ return true
+ default:
+ return false
+ }
+}
+
+// writeQuoted writes s to w surrounded by quotes. Normally it will use double
+// quotes, but if s contains a double quote, it will use single quotes.
+// It is used for writing the identifiers in a doctype declaration.
+// In valid HTML, they can't contain both types of quotes.
+func writeQuoted(w writer, s string) error {
+ var q byte = '"'
+ if strings.Contains(s, `"`) {
+ q = '\''
+ }
+ if err := w.WriteByte(q); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(s); err != nil {
+ return err
+ }
+ if err := w.WriteByte(q); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Section 12.1.2, "Elements", gives this list of void elements. Void elements
+// are those that can't have any contents.
+var voidElements = map[string]bool{
+ "area": true,
+ "base": true,
+ "br": true,
+ "col": true,
+ "embed": true,
+ "hr": true,
+ "img": true,
+ "input": true,
+ "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility.
+ "link": true,
+ "meta": true,
+ "param": true,
+ "source": true,
+ "track": true,
+ "wbr": true,
+}
diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go
new file mode 100644
index 0000000..3c57880
--- /dev/null
+++ b/vendor/golang.org/x/net/html/token.go
@@ -0,0 +1,1272 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/html/atom"
+)
+
+// A TokenType is the type of a Token.
+type TokenType uint32
+
+const (
+ // ErrorToken means that an error occurred during tokenization.
+ ErrorToken TokenType = iota
+ // TextToken means a text node.
+ TextToken
+ // A StartTagToken looks like .
+ StartTagToken
+ // An EndTagToken looks like .
+ EndTagToken
+ // A SelfClosingTagToken tag looks like .
+ SelfClosingTagToken
+ // A CommentToken looks like .
+ CommentToken
+ // A DoctypeToken looks like
+ DoctypeToken
+)
+
+// ErrBufferExceeded means that the buffering limit was exceeded.
+var ErrBufferExceeded = errors.New("max buffer exceeded")
+
+// String returns a string representation of the TokenType.
+func (t TokenType) String() string {
+ switch t {
+ case ErrorToken:
+ return "Error"
+ case TextToken:
+ return "Text"
+ case StartTagToken:
+ return "StartTag"
+ case EndTagToken:
+ return "EndTag"
+ case SelfClosingTagToken:
+ return "SelfClosingTag"
+ case CommentToken:
+ return "Comment"
+ case DoctypeToken:
+ return "Doctype"
+ }
+ return "Invalid(" + strconv.Itoa(int(t)) + ")"
+}
+
+// An Attribute is an attribute namespace-key-value triple. Namespace is
+// non-empty for foreign attributes like xlink, Key is alphabetic (and hence
+// does not contain escapable characters like '&', '<' or '>'), and Val is
+// unescaped (it looks like "a"
+ case EndTagToken:
+ return "" + t.tagString() + ">"
+ case SelfClosingTagToken:
+ return "<" + t.tagString() + "/>"
+ case CommentToken:
+ return ""
+ case DoctypeToken:
+ return ""
+ }
+ return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
+}
+
+// span is a range of bytes in a Tokenizer's buffer. The start is inclusive,
+// the end is exclusive.
+type span struct {
+ start, end int
+}
+
+// A Tokenizer returns a stream of HTML Tokens.
+type Tokenizer struct {
+ // r is the source of the HTML text.
+ r io.Reader
+ // tt is the TokenType of the current token.
+ tt TokenType
+ // err is the first error encountered during tokenization. It is possible
+ // for tt != Error && err != nil to hold: this means that Next returned a
+ // valid token but the subsequent Next call will return an error token.
+ // For example, if the HTML text input was just "plain", then the first
+ // Next call would set z.err to io.EOF but return a TextToken, and all
+ // subsequent Next calls would return an ErrorToken.
+ // err is never reset. Once it becomes non-nil, it stays non-nil.
+ err error
+ // readErr is the error returned by the io.Reader r. It is separate from
+ // err because it is valid for an io.Reader to return (n int, err1 error)
+ // such that n > 0 && err1 != nil, and callers should always process the
+ // n > 0 bytes before considering the error err1.
+ readErr error
+ // buf[raw.start:raw.end] holds the raw bytes of the current token.
+ // buf[raw.end:] is buffered input that will yield future tokens.
+ raw span
+ buf []byte
+ // maxBuf limits the data buffered in buf. A value of 0 means unlimited.
+ maxBuf int
+ // buf[data.start:data.end] holds the raw bytes of the current token's data:
+ // a text token's text, a tag token's tag name, etc.
+ data span
+ // pendingAttr is the attribute key and value currently being tokenized.
+ // When complete, pendingAttr is pushed onto attr. nAttrReturned is
+ // incremented on each call to TagAttr.
+ pendingAttr [2]span
+ attr [][2]span
+ nAttrReturned int
+ // rawTag is the "script" in "" that closes the next token. If
+ // non-empty, the subsequent call to Next will return a raw or RCDATA text
+ // token: one that treats "
" as text instead of an element.
+ // rawTag's contents are lower-cased.
+ rawTag string
+ // textIsRaw is whether the current text token's data is not escaped.
+ textIsRaw bool
+ // convertNUL is whether NUL bytes in the current token's data should
+ // be converted into \ufffd replacement characters.
+ convertNUL bool
+ // allowCDATA is whether CDATA sections are allowed in the current context.
+ allowCDATA bool
+}
+
+// AllowCDATA sets whether or not the tokenizer recognizes as
+// the text "foo". The default value is false, which means to recognize it as
+// a bogus comment "" instead.
+//
+// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and
+// only if tokenizing foreign content, such as MathML and SVG. However,
+// tracking foreign-contentness is difficult to do purely in the tokenizer,
+// as opposed to the parser, due to HTML integration points: an