diff --git a/go.mod b/go.mod
index 73af87099..2af72915e 100644
--- a/go.mod
+++ b/go.mod
@@ -1,28 +1,28 @@
module carvel.dev/kapp-controller
-go 1.23.8
+go 1.24.0
require (
carvel.dev/vendir v0.44.0
github.com/fatih/color v1.18.0 // indirect
github.com/gogo/protobuf v1.3.2
- github.com/google/go-cmp v0.6.0 // indirect
- github.com/prometheus/client_golang v1.18.0
+ github.com/google/go-cmp v0.7.0 // indirect
+ github.com/prometheus/client_golang v1.22.0
github.com/stretchr/testify v1.10.0
golang.org/x/crypto v0.36.0
golang.org/x/text v0.23.0 // indirect
golang.org/x/tools v0.29.0
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/api v0.30.1
- k8s.io/apimachinery v0.30.1
- k8s.io/apiserver v0.30.1
- k8s.io/client-go v0.30.1
- k8s.io/code-generator v0.30.1
- k8s.io/kube-aggregator v0.22.17
- k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340
+ k8s.io/api v0.34.0
+ k8s.io/apimachinery v0.34.0
+ k8s.io/apiserver v0.34.0
+ k8s.io/client-go v0.34.0
+ k8s.io/code-generator v0.34.0
+ k8s.io/kube-aggregator v0.34.0
+ k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
sigs.k8s.io/controller-runtime v0.18.5
sigs.k8s.io/controller-tools v0.7.0
- sigs.k8s.io/yaml v1.4.0
+ sigs.k8s.io/yaml v1.6.0
)
require (
@@ -30,101 +30,105 @@ require (
github.com/cppforlife/go-cli-ui v0.0.0-20220425131040-94f26b16bc14
github.com/go-logr/logr v1.4.2
github.com/k14s/semver/v4 v4.0.1-0.20210701191048-266d47ac6115
- github.com/prometheus/client_model v0.5.0
+ github.com/prometheus/client_model v0.6.1
github.com/spf13/cobra v1.9.1
golang.org/x/sync v0.13.0
gopkg.in/yaml.v2 v2.4.0
- k8s.io/component-base v0.30.1
- k8s.io/klog/v2 v2.120.1
- k8s.io/utils v0.0.0-20230726121419-3b25d923346b
+ k8s.io/component-base v0.34.0
+ k8s.io/klog/v2 v2.130.1
+ k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
)
require (
+ cel.dev/expr v0.24.0 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
- github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
- github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
+ github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/carvel-dev/semver/v4 v4.0.1-0.20240402203627-beb83fbf25e4 // indirect
- github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cppforlife/color v1.9.1-0.20200716202919-6706ac40b835 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/emicklei/go-restful/v3 v3.11.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
- github.com/felixge/httpsnoop v1.0.3 // indirect
- github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/fsnotify/fsnotify v1.9.0 // indirect
+ github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
- github.com/go-openapi/jsonpointer v0.19.6 // indirect
+ github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
- github.com/go-openapi/swag v0.22.3 // indirect
+ github.com/go-openapi/swag v0.23.0 // indirect
github.com/gobuffalo/flect v0.2.3 // indirect
- github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
- github.com/google/cel-go v0.17.8 // indirect
- github.com/google/gnostic-models v0.6.8 // indirect
- github.com/google/gofuzz v1.2.0 // indirect
+ github.com/google/btree v1.1.3 // indirect
+ github.com/google/cel-go v0.26.0 // indirect
+ github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
github.com/hashicorp/go-version v1.2.1 // indirect
- github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
+ github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
- github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/common v0.45.0 // indirect
- github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/prometheus/common v0.62.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
github.com/spf13/pflag v1.0.6 // indirect
- github.com/stoewer/go-strcase v1.2.0 // indirect
+ github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/vito/go-interact v1.0.1 // indirect
- go.etcd.io/etcd/api/v3 v3.5.10 // indirect
- go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect
- go.etcd.io/etcd/client/v3 v3.5.10 // indirect
+ github.com/x448/float16 v0.8.4 // indirect
+ go.etcd.io/etcd/api/v3 v3.6.4 // indirect
+ go.etcd.io/etcd/client/pkg/v3 v3.6.4 // indirect
+ go.etcd.io/etcd/client/v3 v3.6.4 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect
- go.opentelemetry.io/otel v1.34.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect
- go.opentelemetry.io/otel/metric v1.34.0 // indirect
- go.opentelemetry.io/otel/sdk v1.32.0 // indirect
- go.opentelemetry.io/otel/trace v1.34.0 // indirect
- go.opentelemetry.io/proto/otlp v1.0.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
+ go.opentelemetry.io/otel v1.35.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
+ go.opentelemetry.io/otel/metric v1.35.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.34.0 // indirect
+ go.opentelemetry.io/otel/trace v1.35.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
- go.uber.org/zap v1.26.0 // indirect
- golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
+ go.uber.org/zap v1.27.0 // indirect
+ go.yaml.in/yaml/v2 v2.4.2 // indirect
+ go.yaml.in/yaml/v3 v3.0.4 // indirect
+ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/mod v0.22.0 // indirect
golang.org/x/net v0.38.0 // indirect
- golang.org/x/oauth2 v0.25.0 // indirect
+ golang.org/x/oauth2 v0.27.0 // indirect
golang.org/x/sys v0.32.0 // indirect
golang.org/x/term v0.30.0 // indirect
- golang.org/x/time v0.3.0 // indirect
+ golang.org/x/time v0.9.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287 // indirect
- google.golang.org/grpc v1.70.0 // indirect
- google.golang.org/protobuf v1.36.4 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
+ google.golang.org/grpc v1.72.1 // indirect
+ google.golang.org/protobuf v1.36.5 // indirect
+ gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
k8s.io/apiextensions-apiserver v0.30.1 // indirect
- k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
- k8s.io/kms v0.30.1 // indirect
- sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+ k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
+ k8s.io/kms v0.34.0 // indirect
+ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
+ sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
+ sigs.k8s.io/randfill v1.0.0 // indirect
+ sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)
replace cloud.google.com/go => cloud.google.com/go v0.60.0
diff --git a/go.sum b/go.sum
index 20db25c74..d2793a862 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,7 @@
carvel.dev/vendir v0.44.0 h1:vfq5KgGbbLlxHrE0prY7gZgiEQpjwo4lS2akCaVkcxA=
carvel.dev/vendir v0.44.0/go.mod h1:gslrJ0HPiy8gtJYsQZHzIVuGfOG0nfDKDupEm7uBWVQ=
+cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
+cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -38,12 +40,11 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
-github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
+github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
+github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -59,8 +60,8 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/carvel-dev/semver/v4 v4.0.1-0.20240402203627-beb83fbf25e4 h1:F4rZiMGZyC66j9VB7doVOE4tFHF1yNEihQlOuht4jmM=
github.com/carvel-dev/semver/v4 v4.0.1-0.20240402203627-beb83fbf25e4/go.mod h1:4cFTBLAr/U11ykiEEQMccu4uJ1i0GS+atJmeETHCFtI=
-github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
@@ -110,8 +111,8 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
-github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
+github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@@ -128,14 +129,16 @@ github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGE
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
-github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
-github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
+github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
+github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
+github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
@@ -159,20 +162,23 @@ github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gobuffalo/flect v0.2.3 h1:f/ZukRnSNA/DUpSNDadko7Qc0PhGvsew35p/2tu+CRY=
github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@@ -181,16 +187,13 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
-github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
+github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/glog v1.2.3 h1:oDTdz9f5VGVVNGu/Q7UXKWYsD0873HXLHdJUNBsSEKM=
-github.com/golang/glog v1.2.3/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
@@ -214,12 +217,13 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
-github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
-github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
-github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
-github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
+github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
+github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
+github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -229,17 +233,16 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
+github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -250,19 +253,22 @@ github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2c
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
-github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
+github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA=
+github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 h1:FbSCl+KggFl+Ocym490i/EyXF4lPgLoUtcSWquBM0Rs=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -288,14 +294,13 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
-github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
+github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@@ -314,6 +319,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
@@ -329,6 +336,8 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -349,8 +358,6 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
-github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@@ -368,8 +375,9 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
@@ -388,14 +396,14 @@ github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1ls
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
-github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8=
-github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
+github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
+github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
-github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk=
-github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg=
+github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
+github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
@@ -415,28 +423,28 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
-github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
-github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
-github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
+github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
+github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
-github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@@ -480,12 +488,15 @@ github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
-github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
+github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -504,37 +515,38 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
github.com/vito/go-interact v1.0.1 h1:O8xi8c93bRUv2Tb/v6HdiuGc+WnWt+AQzF74MOOdlBs=
github.com/vito/go-interact v1.0.1/go.mod h1:HrdHSJXD2yn1MhlTwSIMeFgQ5WftiIorszVGd3S/DAA=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk=
+github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
-go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
-go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
+go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I=
+go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k=
-go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI=
+go.etcd.io/etcd/api/v3 v3.6.4 h1:7F6N7toCKcV72QmoUKa23yYLiiljMrT4xCeBL9BmXdo=
+go.etcd.io/etcd/api/v3 v3.6.4/go.mod h1:eFhhvfR8Px1P6SEuLT600v+vrhdDTdcfMzmnxVXXSbk=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0=
-go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U=
+go.etcd.io/etcd/client/pkg/v3 v3.6.4 h1:9HBYrjppeOfFjBjaMTRxT3R7xT0GLK8EJMVC4xg6ok0=
+go.etcd.io/etcd/client/pkg/v3 v3.6.4/go.mod h1:sbdzr2cl3HzVmxNw//PH7aLGVtY4QySjQFuaCgcRFAI=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
-go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=
-go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
-go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao=
-go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc=
+go.etcd.io/etcd/client/v3 v3.6.4 h1:YOMrCfMhRzY8NgtzUsHl8hC2EBSnuqbR3dh84Uryl7A=
+go.etcd.io/etcd/client/v3 v3.6.4/go.mod h1:jaNNHCyg2FdALyKWnd7hxZXZxZANb0+KGY+YQaEMISo=
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
-go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=
-go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs=
+go.etcd.io/etcd/pkg/v3 v3.6.4 h1:fy8bmXIec1Q35/jRZ0KOes8vuFxbvdN0aAFqmEfJZWA=
+go.etcd.io/etcd/pkg/v3 v3.6.4/go.mod h1:kKcYWP8gHuBRcteyv6MXWSN0+bVMnfgqiHueIZnKMtE=
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
-go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=
-go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc=
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
-go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg=
-go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo=
+go.etcd.io/etcd/server/v3 v3.6.4 h1:LsCA7CzjVt+8WGrdsnh6RhC0XqCsLkBly3ve5rTxMAU=
+go.etcd.io/etcd/server/v3 v3.6.4/go.mod h1:aYCL/h43yiONOv0QIR82kH/2xZ7m+IWYjzRmyQfnCAg=
+go.etcd.io/raft/v3 v3.6.0 h1:5NtvbDVYpnfZWcIHgGRk9DyzkBIXOi8j+DDp1IcnUWQ=
+go.etcd.io/raft/v3 v3.6.0/go.mod h1:nLvLevg6+xrVtHUmVaTcTz603gQPHfh7kUAwV6YpfGo=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -544,36 +556,36 @@ go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJyS
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
-go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
-go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
+go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
+go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
-go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
-go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
+go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
+go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
-go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4=
-go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU=
+go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
+go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
-go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
-go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
+go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
+go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
-go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
-go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
+go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
+go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
-go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
+go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
+go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
@@ -585,8 +597,12 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
-go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
-go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
+go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -606,8 +622,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
-golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -664,7 +680,6 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -673,8 +688,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
-golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
+golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -759,8 +774,8 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
-golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
+golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -850,12 +865,10 @@ google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
-google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
-google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a h1:OAiGFfOiA0v9MRYsSidp3ubZaBnteRUyn3xB2ZQ5G/E=
-google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a/go.mod h1:jehYqy3+AhJU9ve55aNOaSml7wUXjF9x6z2LcCfpAhY=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287 h1:J1H9f+LEdWAfHcez/4cvaVBox7cOYT+IU6rgqj5x++8=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk=
+google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
+google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -872,8 +885,8 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
-google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
+google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA=
+google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -886,8 +899,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM=
-google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
+google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -896,6 +909,8 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
+gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
@@ -930,70 +945,62 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8=
-k8s.io/api v0.22.17/go.mod h1:6qVojJ3y+qIq7JSMwTH0BcPHl3dch4HefIC+4nguZhs=
-k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY=
-k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM=
+k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE=
+k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug=
k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQRdwGVCxbC+KFA=
k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws=
k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4=
k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
-k8s.io/apimachinery v0.22.17/go.mod h1:ZvVLP5iLhwVFg2Yx9Gh5W0um0DUauExbRhe+2Z8I1EU=
-k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U=
-k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
+k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0=
+k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI=
-k8s.io/apiserver v0.22.17/go.mod h1:zNXYCtXZ91AkmIUZgQ8lT9vdlDqgSkokJpds/F6DdGU=
-k8s.io/apiserver v0.30.1 h1:BEWEe8bzS12nMtDKXzCF5Q5ovp6LjjYkSp8qOPk8LZ8=
-k8s.io/apiserver v0.30.1/go.mod h1:i87ZnQ+/PGAmSbD/iEKM68bm1D5reX8fO4Ito4B01mo=
+k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg=
+k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ=
k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U=
-k8s.io/client-go v0.22.17/go.mod h1:SQPVpN+E/5Q/aSV7fYDT8VKVdaljhxI/t/84ADVJoC4=
-k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q=
-k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc=
+k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo=
+k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY=
k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
-k8s.io/code-generator v0.22.17/go.mod h1:iOZwYADSgFPNGWfqHFfg1V0TNJnl1t0WyZluQp4baqU=
-k8s.io/code-generator v0.30.1 h1:ZsG++q5Vt0ScmKCeLhynUuWgcwFGg1Hl1AGfatqPJBI=
-k8s.io/code-generator v0.30.1/go.mod h1:hFgxRsvOUg79mbpbVKfjJvRhVz1qLoe40yZDJ/hwRH4=
+k8s.io/code-generator v0.34.0 h1:Ze2i1QsvUprIlX3oHiGv09BFQRLCz+StA8qKwwFzees=
+k8s.io/code-generator v0.34.0/go.mod h1:Py2+4w2HXItL8CGhks8uI/wS3Y93wPKO/9mBQUYNua0=
k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug=
-k8s.io/component-base v0.22.17/go.mod h1:Mrcvmxs+Ctx/xCYGWoFAvfZO9DC4gDgLtUbPJ4PjjUE=
-k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ=
-k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI=
+k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8=
+k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
-k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
-k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
+k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f h1:SLb+kxmzfA87x4E4brQzB33VBbT2+x7Zq9ROIHmGn9Q=
+k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
-k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
-k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kms v0.30.1 h1:gEIbEeCbFiaN2tNfp/EUhFdGr5/CSj8Eyq6Mkr7cCiY=
-k8s.io/kms v0.30.1/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4=
-k8s.io/kube-aggregator v0.22.17 h1:E8T1McXbB31Z7L71nR8/lMtWS01bhOE7AF8TF/sqDHw=
-k8s.io/kube-aggregator v0.22.17/go.mod h1:J557nueFVurHA1JiDrxT1HlgygNQ+2exsTVUXiz2T7k=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/kms v0.34.0 h1:u+/rcxQ3Jr7gC9AY5nXuEnBcGEB7ZOIJ9cdLdyHyEjQ=
+k8s.io/kms v0.34.0/go.mod h1:s1CFkLG7w9eaTYvctOxosx88fl4spqmixnNpys0JAtM=
+k8s.io/kube-aggregator v0.34.0 h1:XE4u+HOYkj0g44sblhTtPv+QyIIK7sJxrIlia0731kE=
+k8s.io/kube-aggregator v0.34.0/go.mod h1:GIUqdChXVC448Vp2Wgxf0m6fir7Xt3A2TAZcs2JNG1Y=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
-k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
+k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
+k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
-k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/controller-runtime v0.18.5 h1:nTHio/W+Q4aBlQMgbnC5hZb4IjIidyrizMai9P6n4Rk=
sigs.k8s.io/controller-runtime v0.18.5/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg=
sigs.k8s.io/controller-tools v0.7.0 h1:iZIz1vEcavyEfxjcTLs1WH/MPf4vhPCtTKhoHqV8/G0=
sigs.k8s.io/controller-tools v0.7.0/go.mod h1:bpBAo0VcSDDLuWt47evLhMLPxRPxMDInTEH/YbdeMK0=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
-sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
-sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
+sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
diff --git a/vendor/cel.dev/expr/.bazelversion b/vendor/cel.dev/expr/.bazelversion
new file mode 100644
index 000000000..13c50892b
--- /dev/null
+++ b/vendor/cel.dev/expr/.bazelversion
@@ -0,0 +1,2 @@
+7.3.2
+# Keep this pinned version in parity with cel-go
diff --git a/vendor/cel.dev/expr/.gitattributes b/vendor/cel.dev/expr/.gitattributes
new file mode 100644
index 000000000..3de1ec213
--- /dev/null
+++ b/vendor/cel.dev/expr/.gitattributes
@@ -0,0 +1,2 @@
+*.pb.go linguist-generated=true
+*.pb.go -diff -merge
diff --git a/vendor/cel.dev/expr/.gitignore b/vendor/cel.dev/expr/.gitignore
new file mode 100644
index 000000000..0d4fed27c
--- /dev/null
+++ b/vendor/cel.dev/expr/.gitignore
@@ -0,0 +1,2 @@
+bazel-*
+MODULE.bazel.lock
diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel
new file mode 100644
index 000000000..37d8adc95
--- /dev/null
+++ b/vendor/cel.dev/expr/BUILD.bazel
@@ -0,0 +1,34 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"]) # Apache 2.0
+
+go_library(
+ name = "expr",
+ srcs = [
+ "checked.pb.go",
+ "eval.pb.go",
+ "explain.pb.go",
+ "syntax.pb.go",
+ "value.pb.go",
+ ],
+ importpath = "cel.dev/expr",
+ visibility = ["//visibility:public"],
+ deps = [
+ "@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect",
+ "@org_golang_google_protobuf//runtime/protoimpl",
+ "@org_golang_google_protobuf//types/known/anypb",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/emptypb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":expr",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/cel.dev/expr/CODE_OF_CONDUCT.md b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..59908e2d8
--- /dev/null
+++ b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
@@ -0,0 +1,25 @@
+# Contributor Code of Conduct
+## Version 0.1.1 (adapted from 0.3b-angular)
+
+As contributors and maintainers of the Common Expression Language
+(CEL) project, we pledge to respect everyone who contributes by
+posting issues, updating documentation, submitting pull requests,
+providing feedback in comments, and any other activities.
+
+Communication through any of CEL's channels (GitHub, Gitter, IRC,
+mailing lists, Google+, Twitter, etc.) must be constructive and never
+resort to personal attacks, trolling, public or private harassment,
+insults, or other unprofessional conduct.
+
+We promise to extend courtesy and respect to everyone involved in this
+project regardless of gender, gender identity, sexual orientation,
+disability, age, race, ethnicity, religion, or level of experience. We
+expect anyone contributing to the project to do the same.
+
+If any member of the community violates this code of conduct, the
+maintainers of the CEL project may take action, removing issues,
+comments, and PRs or blocking accounts as deemed appropriate.
+
+If you are subject to or witness unacceptable behavior, or have any
+other concerns, please email us at
+[cel-conduct@google.com](mailto:cel-conduct@google.com).
diff --git a/vendor/cel.dev/expr/CONTRIBUTING.md b/vendor/cel.dev/expr/CONTRIBUTING.md
new file mode 100644
index 000000000..8f5fd5c31
--- /dev/null
+++ b/vendor/cel.dev/expr/CONTRIBUTING.md
@@ -0,0 +1,32 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are a
+few guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
+
+## What to expect from maintainers
+
+Expect maintainers to respond to new issues or pull requests within a week.
+For outstanding and ongoing issues and particularly for long-running
+pull requests, expect the maintainers to review within a week of a
+contributor asking for a new review. There is no commitment to resolution --
+merging or closing a pull request, or fixing or closing an issue -- because some
+issues will require more discussion than others.
diff --git a/vendor/cel.dev/expr/GOVERNANCE.md b/vendor/cel.dev/expr/GOVERNANCE.md
new file mode 100644
index 000000000..0a525bc17
--- /dev/null
+++ b/vendor/cel.dev/expr/GOVERNANCE.md
@@ -0,0 +1,43 @@
+# Project Governance
+
+This document defines the governance process for the CEL language. CEL is
+Google-developed, but openly governed. Major contributors to the CEL
+specification and its corresponding implementations constitute the CEL
+Language Council. New members may be added by a unanimous vote of the
+Council.
+
+The MAINTAINERS.md file lists the members of the CEL Language Council, and
+unofficially indicates the "areas of expertise" of each member with respect
+to the publicly available CEL repos.
+
+## Code Changes
+
+Code changes must follow the standard pull request (PR) model documented in the
+CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a
+maintainer. The maintainer reserves the right to request that any feature
+request (FR) or PR be reviewed by the language council.
+
+## Syntax and Semantic Changes
+
+Syntactic and semantic changes must be reviewed by the CEL Language Council.
+Maintainers may also request language council review at their discretion.
+
+The review process is as follows:
+
+- Create a Feature Request in the CEL-Spec repo. The feature description will
+ serve as an abstract for the detailed design document.
+- Co-develop a design document with the Language Council.
+- Once the proposer gives the design document approval, the document will be
+ linked to the FR in the CEL-Spec repo and opened for comments to members of
+ the cel-lang-discuss@googlegroups.com.
+- The Language Council will review the design doc at the next council meeting
+ (once every three weeks) and the council decision included in the document.
+
+If the proposal is approved, the spec will be updated by a maintainer (if
+applicable) and a rationale will be included in the CEL-Spec wiki to ensure
+future developers may follow CEL's growth and direction over time.
+
+Approved proposals may be implemented by the proposer or by the maintainers as
+the parties see fit. At the discretion of the maintainer, changes from the
+approved design are permitted during implementation if they improve the user
+experience and clarity of the feature.
diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/cel.dev/expr/LICENSE
similarity index 100%
rename from vendor/github.com/google/gofuzz/LICENSE
rename to vendor/cel.dev/expr/LICENSE
diff --git a/vendor/cel.dev/expr/MAINTAINERS.md b/vendor/cel.dev/expr/MAINTAINERS.md
new file mode 100644
index 000000000..1ed2eb8ab
--- /dev/null
+++ b/vendor/cel.dev/expr/MAINTAINERS.md
@@ -0,0 +1,13 @@
+# CEL Language Council
+
+| Name | Company | Area of Expertise |
+|-----------------|--------------|-------------------|
+| Alfred Fuller | Facebook | cel-cpp, cel-spec |
+| Jim Larson | Google | cel-go, cel-spec |
+| Matthais Blume | Google | cel-spec |
+| Tristan Swadell | Google | cel-go, cel-spec |
+
+## Emeritus
+
+* Sanjay Ghemawat (Google)
+* Wolfgang Grieskamp (Facebook)
diff --git a/vendor/cel.dev/expr/MODULE.bazel b/vendor/cel.dev/expr/MODULE.bazel
new file mode 100644
index 000000000..85ac9ff61
--- /dev/null
+++ b/vendor/cel.dev/expr/MODULE.bazel
@@ -0,0 +1,74 @@
+module(
+ name = "cel-spec",
+)
+
+bazel_dep(
+ name = "bazel_skylib",
+ version = "1.7.1",
+)
+bazel_dep(
+ name = "gazelle",
+ version = "0.39.1",
+ repo_name = "bazel_gazelle",
+)
+bazel_dep(
+ name = "googleapis",
+ version = "0.0.0-20241220-5e258e33.bcr.1",
+ repo_name = "com_google_googleapis",
+)
+bazel_dep(
+ name = "googleapis-cc",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "googleapis-java",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "googleapis-go",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "protobuf",
+ version = "27.0",
+ repo_name = "com_google_protobuf",
+)
+bazel_dep(
+ name = "rules_cc",
+ version = "0.0.17",
+)
+bazel_dep(
+ name = "rules_go",
+ version = "0.53.0",
+ repo_name = "io_bazel_rules_go",
+)
+bazel_dep(
+ name = "rules_java",
+ version = "7.6.5",
+)
+bazel_dep(
+ name = "rules_proto",
+ version = "7.0.2",
+)
+bazel_dep(
+ name = "rules_python",
+ version = "0.35.0",
+)
+
+### PYTHON ###
+python = use_extension("@rules_python//python/extensions:python.bzl", "python")
+python.toolchain(
+ ignore_root_user_error = True,
+ python_version = "3.11",
+)
+
+go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk")
+go_sdk.download(version = "1.22.0")
+
+go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps")
+go_deps.from_file(go_mod = "//:go.mod")
+use_repo(
+ go_deps,
+ "org_golang_google_genproto_googleapis_rpc",
+ "org_golang_google_protobuf",
+)
diff --git a/vendor/cel.dev/expr/README.md b/vendor/cel.dev/expr/README.md
new file mode 100644
index 000000000..42d67f87c
--- /dev/null
+++ b/vendor/cel.dev/expr/README.md
@@ -0,0 +1,71 @@
+# Common Expression Language
+
+The Common Expression Language (CEL) implements common semantics for expression
+evaluation, enabling different applications to more easily interoperate.
+
+Key Applications
+
+* Security policy: organizations have complex infrastructure and need common
+ tooling to reason about the system as a whole
+* Protocols: expressions are a useful data type and require interoperability
+ across programming languages and platforms.
+
+
+Guiding philosophy:
+
+1. Keep it small & fast.
+ * CEL evaluates in linear time, is mutation free, and not Turing-complete.
+ This limitation is a feature of the language design, which allows the
+ implementation to evaluate orders of magnitude faster than equivalently
+ sandboxed JavaScript.
+2. Make it extensible.
+ * CEL is designed to be embedded in applications, and allows for
+ extensibility via its context which allows for functions and data to be
+ provided by the software that embeds it.
+3. Developer-friendly.
+ * The language is approachable to developers. The initial spec was based
+ on the experience of developing Firebase Rules and usability testing
+ many prior iterations.
+ * The library itself and accompanying toolings should be easy to adopt by
+ teams that seek to integrate CEL into their platforms.
+
+The required components of a system that supports CEL are:
+
+* The textual representation of an expression as written by a developer. It is
+ of similar syntax to expressions in C/C++/Java/JavaScript
+* A representation of the program's abstract syntax tree (AST).
+* A compiler library that converts the textual representation to the binary
+ representation. This can be done ahead of time (in the control plane) or
+ just before evaluation (in the data plane).
+* A context containing one or more typed variables, often protobuf messages.
+ Most use-cases will use `attribute_context.proto`
+* An evaluator library that takes the binary format in the context and
+ produces a result, usually a Boolean.
+
+For use cases which require persistence or cross-process communcation, it is
+highly recommended to serialize the type-checked expression as a protocol
+buffer. The CEL team will maintains canonical protocol buffers for ASTs and
+will keep these versions identical and wire-compatible in perpetuity:
+
+* [CEL canonical](https://github.com/google/cel-spec/tree/master/proto/cel/expr)
+* [CEL v1alpha1](https://github.com/googleapis/googleapis/tree/master/google/api/expr/v1alpha1)
+
+
+Example of boolean conditions and object construction:
+
+``` c
+// Condition
+account.balance >= transaction.withdrawal
+ || (account.overdraftProtection
+ && account.overdraftLimit >= transaction.withdrawal - account.balance)
+
+// Object construction
+common.GeoPoint{ latitude: 10.0, longitude: -5.5 }
+```
+
+For more detail, see:
+
+* [Introduction](doc/intro.md)
+* [Language Definition](doc/langdef.md)
+
+Released under the [Apache License](LICENSE).
diff --git a/vendor/cel.dev/expr/WORKSPACE b/vendor/cel.dev/expr/WORKSPACE
new file mode 100644
index 000000000..b6dc9ed67
--- /dev/null
+++ b/vendor/cel.dev/expr/WORKSPACE
@@ -0,0 +1,145 @@
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+http_archive(
+ name = "io_bazel_rules_go",
+ sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
+ "https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
+ ],
+)
+
+http_archive(
+ name = "bazel_gazelle",
+ sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
+ "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "rules_proto",
+ sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d",
+ strip_prefix = "rules_proto-4.0.0-3.20.0",
+ urls = [
+ "https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz",
+ ],
+)
+
+# googleapis as of 09/16/2024
+http_archive(
+ name = "com_google_googleapis",
+ strip_prefix = "googleapis-4082d5e51e8481f6ccc384cacd896f4e78f19dee",
+ sha256 = "57319889d47578b3c89bf1b3f34888d796a8913d63b32d750a4cd12ed303c4e8",
+ urls = [
+ "https://github.com/googleapis/googleapis/archive/4082d5e51e8481f6ccc384cacd896f4e78f19dee.tar.gz",
+ ],
+)
+
+# protobuf
+http_archive(
+ name = "com_google_protobuf",
+ sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2",
+ strip_prefix = "protobuf-3.21.5",
+ urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"],
+)
+
+# googletest
+http_archive(
+ name = "com_google_googletest",
+ urls = ["https://github.com/google/googletest/archive/master.zip"],
+ strip_prefix = "googletest-master",
+)
+
+# gflags
+http_archive(
+ name = "com_github_gflags_gflags",
+ sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe",
+ strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a",
+ urls = [
+ "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
+ "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
+ ],
+)
+
+# glog
+http_archive(
+ name = "com_google_glog",
+ sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21",
+ strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
+ "https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
+ ],
+)
+
+# absl
+http_archive(
+ name = "com_google_absl",
+ strip_prefix = "abseil-cpp-master",
+ urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"],
+)
+
+load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains")
+load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
+load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language")
+load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains")
+load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
+
+switched_rules_by_language(
+ name = "com_google_googleapis_imports",
+ cc = True,
+)
+
+# Do *not* call *_dependencies(), etc, yet. See comment at the end.
+
+# Generated Google APIs protos for Golang
+# Generated Google APIs protos for Golang 08/26/2024
+go_repository(
+ name = "org_golang_google_genproto_googleapis_api",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/genproto/googleapis/api",
+ sum = "h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=",
+ version = "v0.0.0-20240826202546-f6391c0de4c7",
+)
+
+# Generated Google APIs protos for Golang 08/26/2024
+go_repository(
+ name = "org_golang_google_genproto_googleapis_rpc",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/genproto/googleapis/rpc",
+ sum = "h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=",
+ version = "v0.0.0-20240826202546-f6391c0de4c7",
+)
+
+# gRPC deps
+go_repository(
+ name = "org_golang_google_grpc",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/grpc",
+ tag = "v1.49.0",
+)
+
+go_repository(
+ name = "org_golang_x_net",
+ importpath = "golang.org/x/net",
+ sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=",
+ version = "v0.0.0-20190311183353-d8887717615a",
+)
+
+go_repository(
+ name = "org_golang_x_text",
+ importpath = "golang.org/x/text",
+ sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=",
+ version = "v0.3.2",
+)
+
+# Run the dependencies at the end. These will silently try to import some
+# of the above repositories but at different versions, so ours must come first.
+go_rules_dependencies()
+go_register_toolchains(version = "1.19.1")
+gazelle_dependencies()
+rules_proto_dependencies()
+rules_proto_toolchains()
+protobuf_deps()
diff --git a/vendor/cel.dev/expr/WORKSPACE.bzlmod b/vendor/cel.dev/expr/WORKSPACE.bzlmod
new file mode 100644
index 000000000..e69de29bb
diff --git a/vendor/cel.dev/expr/checked.pb.go b/vendor/cel.dev/expr/checked.pb.go
new file mode 100644
index 000000000..bb225c8ab
--- /dev/null
+++ b/vendor/cel.dev/expr/checked.pb.go
@@ -0,0 +1,1432 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/checked.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Type_PrimitiveType int32
+
+const (
+ Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0
+ Type_BOOL Type_PrimitiveType = 1
+ Type_INT64 Type_PrimitiveType = 2
+ Type_UINT64 Type_PrimitiveType = 3
+ Type_DOUBLE Type_PrimitiveType = 4
+ Type_STRING Type_PrimitiveType = 5
+ Type_BYTES Type_PrimitiveType = 6
+)
+
+// Enum value maps for Type_PrimitiveType.
+var (
+ Type_PrimitiveType_name = map[int32]string{
+ 0: "PRIMITIVE_TYPE_UNSPECIFIED",
+ 1: "BOOL",
+ 2: "INT64",
+ 3: "UINT64",
+ 4: "DOUBLE",
+ 5: "STRING",
+ 6: "BYTES",
+ }
+ Type_PrimitiveType_value = map[string]int32{
+ "PRIMITIVE_TYPE_UNSPECIFIED": 0,
+ "BOOL": 1,
+ "INT64": 2,
+ "UINT64": 3,
+ "DOUBLE": 4,
+ "STRING": 5,
+ "BYTES": 6,
+ }
+)
+
+func (x Type_PrimitiveType) Enum() *Type_PrimitiveType {
+ p := new(Type_PrimitiveType)
+ *p = x
+ return p
+}
+
+func (x Type_PrimitiveType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_checked_proto_enumTypes[0].Descriptor()
+}
+
+func (Type_PrimitiveType) Type() protoreflect.EnumType {
+ return &file_cel_expr_checked_proto_enumTypes[0]
+}
+
+func (x Type_PrimitiveType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_PrimitiveType.Descriptor instead.
+func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type Type_WellKnownType int32
+
+const (
+ Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0
+ Type_ANY Type_WellKnownType = 1
+ Type_TIMESTAMP Type_WellKnownType = 2
+ Type_DURATION Type_WellKnownType = 3
+)
+
+// Enum value maps for Type_WellKnownType.
+var (
+ Type_WellKnownType_name = map[int32]string{
+ 0: "WELL_KNOWN_TYPE_UNSPECIFIED",
+ 1: "ANY",
+ 2: "TIMESTAMP",
+ 3: "DURATION",
+ }
+ Type_WellKnownType_value = map[string]int32{
+ "WELL_KNOWN_TYPE_UNSPECIFIED": 0,
+ "ANY": 1,
+ "TIMESTAMP": 2,
+ "DURATION": 3,
+ }
+)
+
+func (x Type_WellKnownType) Enum() *Type_WellKnownType {
+ p := new(Type_WellKnownType)
+ *p = x
+ return p
+}
+
+func (x Type_WellKnownType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_checked_proto_enumTypes[1].Descriptor()
+}
+
+func (Type_WellKnownType) Type() protoreflect.EnumType {
+ return &file_cel_expr_checked_proto_enumTypes[1]
+}
+
+func (x Type_WellKnownType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_WellKnownType.Descriptor instead.
+func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+type CheckedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+ ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"`
+ Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"`
+}
+
+func (x *CheckedExpr) Reset() {
+ *x = CheckedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CheckedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CheckedExpr) ProtoMessage() {}
+
+func (x *CheckedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead.
+func (*CheckedExpr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference {
+ if x != nil {
+ return x.ReferenceMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetTypeMap() map[int64]*Type {
+ if x != nil {
+ return x.TypeMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetExprVersion() string {
+ if x != nil {
+ return x.ExprVersion
+ }
+ return ""
+}
+
+func (x *CheckedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+type Type struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to TypeKind:
+ //
+ // *Type_Dyn
+ // *Type_Null
+ // *Type_Primitive
+ // *Type_Wrapper
+ // *Type_WellKnown
+ // *Type_ListType_
+ // *Type_MapType_
+ // *Type_Function
+ // *Type_MessageType
+ // *Type_TypeParam
+ // *Type_Type
+ // *Type_Error
+ // *Type_AbstractType_
+ TypeKind isType_TypeKind `protobuf_oneof:"type_kind"`
+}
+
+func (x *Type) Reset() {
+ *x = Type{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type) ProtoMessage() {}
+
+func (x *Type) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type.ProtoReflect.Descriptor instead.
+func (*Type) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *Type) GetTypeKind() isType_TypeKind {
+ if m != nil {
+ return m.TypeKind
+ }
+ return nil
+}
+
+func (x *Type) GetDyn() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Dyn); ok {
+ return x.Dyn
+ }
+ return nil
+}
+
+func (x *Type) GetNull() structpb.NullValue {
+ if x, ok := x.GetTypeKind().(*Type_Null); ok {
+ return x.Null
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Type) GetPrimitive() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Primitive); ok {
+ return x.Primitive
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWrapper() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Wrapper); ok {
+ return x.Wrapper
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWellKnown() Type_WellKnownType {
+ if x, ok := x.GetTypeKind().(*Type_WellKnown); ok {
+ return x.WellKnown
+ }
+ return Type_WELL_KNOWN_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetListType() *Type_ListType {
+ if x, ok := x.GetTypeKind().(*Type_ListType_); ok {
+ return x.ListType
+ }
+ return nil
+}
+
+func (x *Type) GetMapType() *Type_MapType {
+ if x, ok := x.GetTypeKind().(*Type_MapType_); ok {
+ return x.MapType
+ }
+ return nil
+}
+
+func (x *Type) GetFunction() *Type_FunctionType {
+ if x, ok := x.GetTypeKind().(*Type_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+func (x *Type) GetMessageType() string {
+ if x, ok := x.GetTypeKind().(*Type_MessageType); ok {
+ return x.MessageType
+ }
+ return ""
+}
+
+func (x *Type) GetTypeParam() string {
+ if x, ok := x.GetTypeKind().(*Type_TypeParam); ok {
+ return x.TypeParam
+ }
+ return ""
+}
+
+func (x *Type) GetType() *Type {
+ if x, ok := x.GetTypeKind().(*Type_Type); ok {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Type) GetError() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *Type) GetAbstractType() *Type_AbstractType {
+ if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok {
+ return x.AbstractType
+ }
+ return nil
+}
+
+type isType_TypeKind interface {
+ isType_TypeKind()
+}
+
+type Type_Dyn struct {
+ Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"`
+}
+
+type Type_Null struct {
+ Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Type_Primitive struct {
+ Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=cel.expr.Type_PrimitiveType,oneof"`
+}
+
+type Type_Wrapper struct {
+ Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=cel.expr.Type_PrimitiveType,oneof"`
+}
+
+type Type_WellKnown struct {
+ WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=cel.expr.Type_WellKnownType,oneof"`
+}
+
+type Type_ListType_ struct {
+ ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"`
+}
+
+type Type_MapType_ struct {
+ MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"`
+}
+
+type Type_Function struct {
+ Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"`
+}
+
+type Type_MessageType struct {
+ MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"`
+}
+
+type Type_TypeParam struct {
+ TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"`
+}
+
+type Type_Type struct {
+ Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"`
+}
+
+type Type_Error struct {
+ Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"`
+}
+
+type Type_AbstractType_ struct {
+ AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"`
+}
+
+func (*Type_Dyn) isType_TypeKind() {}
+
+func (*Type_Null) isType_TypeKind() {}
+
+func (*Type_Primitive) isType_TypeKind() {}
+
+func (*Type_Wrapper) isType_TypeKind() {}
+
+func (*Type_WellKnown) isType_TypeKind() {}
+
+func (*Type_ListType_) isType_TypeKind() {}
+
+func (*Type_MapType_) isType_TypeKind() {}
+
+func (*Type_Function) isType_TypeKind() {}
+
+func (*Type_MessageType) isType_TypeKind() {}
+
+func (*Type_TypeParam) isType_TypeKind() {}
+
+func (*Type_Type) isType_TypeKind() {}
+
+func (*Type_Error) isType_TypeKind() {}
+
+func (*Type_AbstractType_) isType_TypeKind() {}
+
+type Decl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Types that are assignable to DeclKind:
+ //
+ // *Decl_Ident
+ // *Decl_Function
+ DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"`
+}
+
+func (x *Decl) Reset() {
+ *x = Decl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl) ProtoMessage() {}
+
+func (x *Decl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl.ProtoReflect.Descriptor instead.
+func (*Decl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Decl) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *Decl) GetDeclKind() isDecl_DeclKind {
+ if m != nil {
+ return m.DeclKind
+ }
+ return nil
+}
+
+func (x *Decl) GetIdent() *Decl_IdentDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Ident); ok {
+ return x.Ident
+ }
+ return nil
+}
+
+func (x *Decl) GetFunction() *Decl_FunctionDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+type isDecl_DeclKind interface {
+ isDecl_DeclKind()
+}
+
+type Decl_Ident struct {
+ Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"`
+}
+
+type Decl_Function struct {
+ Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"`
+}
+
+func (*Decl_Ident) isDecl_DeclKind() {}
+
+func (*Decl_Function) isDecl_DeclKind() {}
+
+type Reference struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Reference) Reset() {
+ *x = Reference{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Reference) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Reference) ProtoMessage() {}
+
+func (x *Reference) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Reference.ProtoReflect.Descriptor instead.
+func (*Reference) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Reference) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Reference) GetOverloadId() []string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return nil
+}
+
+func (x *Reference) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+type Type_ListType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"`
+}
+
+func (x *Type_ListType) Reset() {
+ *x = Type_ListType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_ListType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_ListType) ProtoMessage() {}
+
+func (x *Type_ListType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead.
+func (*Type_ListType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Type_ListType) GetElemType() *Type {
+ if x != nil {
+ return x.ElemType
+ }
+ return nil
+}
+
+type Type_MapType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"`
+ ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"`
+}
+
+func (x *Type_MapType) Reset() {
+ *x = Type_MapType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_MapType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_MapType) ProtoMessage() {}
+
+func (x *Type_MapType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead.
+func (*Type_MapType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Type_MapType) GetKeyType() *Type {
+ if x != nil {
+ return x.KeyType
+ }
+ return nil
+}
+
+func (x *Type_MapType) GetValueType() *Type {
+ if x != nil {
+ return x.ValueType
+ }
+ return nil
+}
+
+type Type_FunctionType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"`
+}
+
+func (x *Type_FunctionType) Reset() {
+ *x = Type_FunctionType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_FunctionType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_FunctionType) ProtoMessage() {}
+
+func (x *Type_FunctionType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead.
+func (*Type_FunctionType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Type_FunctionType) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Type_FunctionType) GetArgTypes() []*Type {
+ if x != nil {
+ return x.ArgTypes
+ }
+ return nil
+}
+
+type Type_AbstractType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"`
+}
+
+func (x *Type_AbstractType) Reset() {
+ *x = Type_AbstractType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_AbstractType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_AbstractType) ProtoMessage() {}
+
+func (x *Type_AbstractType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead.
+func (*Type_AbstractType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Type_AbstractType) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Type_AbstractType) GetParameterTypes() []*Type {
+ if x != nil {
+ return x.ParameterTypes
+ }
+ return nil
+}
+
+type Decl_IdentDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_IdentDecl) Reset() {
+ *x = Decl_IdentDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_IdentDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_IdentDecl) ProtoMessage() {}
+
+func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead.
+func (*Decl_IdentDecl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *Decl_IdentDecl) GetType() *Type {
+ if x != nil {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+type Decl_FunctionDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"`
+}
+
+func (x *Decl_FunctionDecl) Reset() {
+ *x = Decl_FunctionDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload {
+ if x != nil {
+ return x.Overloads
+ }
+ return nil
+}
+
+type Decl_FunctionDecl_Overload struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"`
+ TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"`
+ ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"`
+ Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_FunctionDecl_Overload) Reset() {
+ *x = Decl_FunctionDecl_Overload{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl_Overload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl_Overload) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1, 0}
+}
+
+func (x *Decl_FunctionDecl_Overload) GetOverloadId() string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return ""
+}
+
+func (x *Decl_FunctionDecl_Overload) GetParams() []*Type {
+ if x != nil {
+ return x.Params
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string {
+ if x != nil {
+ return x.TypeParams
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool {
+ if x != nil {
+ return x.IsInstanceFunction
+ }
+ return false
+}
+
+func (x *Decl_FunctionDecl_Overload) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+var File_cel_expr_checked_proto protoreflect.FileDescriptor
+
+var file_cel_expr_checked_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e,
+ 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64,
+ 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
+ 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d,
+ 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61,
+ 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72,
+ 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65,
+ 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48,
+ 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69,
+ 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+ 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72,
+ 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
+ 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12,
+ 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c,
+ 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70,
+ 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08,
+ 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
+ 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
+ 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65,
+ 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b,
+ 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75,
+ 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79,
+ 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73,
+ 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54,
+ 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49,
+ 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34,
+ 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a,
+ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59,
+ 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
+ 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b,
+ 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01,
+ 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12,
+ 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a,
+ 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44,
+ 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c,
+ 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63,
+ 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12,
+ 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f,
+ 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65,
+ 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c,
+ 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f,
+ 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a,
+ 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30,
+ 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75,
+ 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73,
+ 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64,
+ 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22,
+ 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49,
+ 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64,
+ 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63,
+ 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
+ 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_cel_expr_checked_proto_rawDescOnce sync.Once
+ file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc
+)
+
+func file_cel_expr_checked_proto_rawDescGZIP() []byte {
+ file_cel_expr_checked_proto_rawDescOnce.Do(func() {
+ file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData)
+ })
+ return file_cel_expr_checked_proto_rawDescData
+}
+
+var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_cel_expr_checked_proto_goTypes = []interface{}{
+ (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType
+ (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType
+ (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr
+ (*Type)(nil), // 3: cel.expr.Type
+ (*Decl)(nil), // 4: cel.expr.Decl
+ (*Reference)(nil), // 5: cel.expr.Reference
+ nil, // 6: cel.expr.CheckedExpr.ReferenceMapEntry
+ nil, // 7: cel.expr.CheckedExpr.TypeMapEntry
+ (*Type_ListType)(nil), // 8: cel.expr.Type.ListType
+ (*Type_MapType)(nil), // 9: cel.expr.Type.MapType
+ (*Type_FunctionType)(nil), // 10: cel.expr.Type.FunctionType
+ (*Type_AbstractType)(nil), // 11: cel.expr.Type.AbstractType
+ (*Decl_IdentDecl)(nil), // 12: cel.expr.Decl.IdentDecl
+ (*Decl_FunctionDecl)(nil), // 13: cel.expr.Decl.FunctionDecl
+ (*Decl_FunctionDecl_Overload)(nil), // 14: cel.expr.Decl.FunctionDecl.Overload
+ (*SourceInfo)(nil), // 15: cel.expr.SourceInfo
+ (*Expr)(nil), // 16: cel.expr.Expr
+ (*emptypb.Empty)(nil), // 17: google.protobuf.Empty
+ (structpb.NullValue)(0), // 18: google.protobuf.NullValue
+ (*Constant)(nil), // 19: cel.expr.Constant
+}
+var file_cel_expr_checked_proto_depIdxs = []int32{
+ 6, // 0: cel.expr.CheckedExpr.reference_map:type_name -> cel.expr.CheckedExpr.ReferenceMapEntry
+ 7, // 1: cel.expr.CheckedExpr.type_map:type_name -> cel.expr.CheckedExpr.TypeMapEntry
+ 15, // 2: cel.expr.CheckedExpr.source_info:type_name -> cel.expr.SourceInfo
+ 16, // 3: cel.expr.CheckedExpr.expr:type_name -> cel.expr.Expr
+ 17, // 4: cel.expr.Type.dyn:type_name -> google.protobuf.Empty
+ 18, // 5: cel.expr.Type.null:type_name -> google.protobuf.NullValue
+ 0, // 6: cel.expr.Type.primitive:type_name -> cel.expr.Type.PrimitiveType
+ 0, // 7: cel.expr.Type.wrapper:type_name -> cel.expr.Type.PrimitiveType
+ 1, // 8: cel.expr.Type.well_known:type_name -> cel.expr.Type.WellKnownType
+ 8, // 9: cel.expr.Type.list_type:type_name -> cel.expr.Type.ListType
+ 9, // 10: cel.expr.Type.map_type:type_name -> cel.expr.Type.MapType
+ 10, // 11: cel.expr.Type.function:type_name -> cel.expr.Type.FunctionType
+ 3, // 12: cel.expr.Type.type:type_name -> cel.expr.Type
+ 17, // 13: cel.expr.Type.error:type_name -> google.protobuf.Empty
+ 11, // 14: cel.expr.Type.abstract_type:type_name -> cel.expr.Type.AbstractType
+ 12, // 15: cel.expr.Decl.ident:type_name -> cel.expr.Decl.IdentDecl
+ 13, // 16: cel.expr.Decl.function:type_name -> cel.expr.Decl.FunctionDecl
+ 19, // 17: cel.expr.Reference.value:type_name -> cel.expr.Constant
+ 5, // 18: cel.expr.CheckedExpr.ReferenceMapEntry.value:type_name -> cel.expr.Reference
+ 3, // 19: cel.expr.CheckedExpr.TypeMapEntry.value:type_name -> cel.expr.Type
+ 3, // 20: cel.expr.Type.ListType.elem_type:type_name -> cel.expr.Type
+ 3, // 21: cel.expr.Type.MapType.key_type:type_name -> cel.expr.Type
+ 3, // 22: cel.expr.Type.MapType.value_type:type_name -> cel.expr.Type
+ 3, // 23: cel.expr.Type.FunctionType.result_type:type_name -> cel.expr.Type
+ 3, // 24: cel.expr.Type.FunctionType.arg_types:type_name -> cel.expr.Type
+ 3, // 25: cel.expr.Type.AbstractType.parameter_types:type_name -> cel.expr.Type
+ 3, // 26: cel.expr.Decl.IdentDecl.type:type_name -> cel.expr.Type
+ 19, // 27: cel.expr.Decl.IdentDecl.value:type_name -> cel.expr.Constant
+ 14, // 28: cel.expr.Decl.FunctionDecl.overloads:type_name -> cel.expr.Decl.FunctionDecl.Overload
+ 3, // 29: cel.expr.Decl.FunctionDecl.Overload.params:type_name -> cel.expr.Type
+ 3, // 30: cel.expr.Decl.FunctionDecl.Overload.result_type:type_name -> cel.expr.Type
+ 31, // [31:31] is the sub-list for method output_type
+ 31, // [31:31] is the sub-list for method input_type
+ 31, // [31:31] is the sub-list for extension type_name
+ 31, // [31:31] is the sub-list for extension extendee
+ 0, // [0:31] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_checked_proto_init() }
+func file_cel_expr_checked_proto_init() {
+ if File_cel_expr_checked_proto != nil {
+ return
+ }
+ file_cel_expr_syntax_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CheckedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Reference); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_ListType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_MapType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_FunctionType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_AbstractType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_IdentDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl_Overload); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Type_Dyn)(nil),
+ (*Type_Null)(nil),
+ (*Type_Primitive)(nil),
+ (*Type_Wrapper)(nil),
+ (*Type_WellKnown)(nil),
+ (*Type_ListType_)(nil),
+ (*Type_MapType_)(nil),
+ (*Type_Function)(nil),
+ (*Type_MessageType)(nil),
+ (*Type_TypeParam)(nil),
+ (*Type_Type)(nil),
+ (*Type_Error)(nil),
+ (*Type_AbstractType_)(nil),
+ }
+ file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Decl_Ident)(nil),
+ (*Decl_Function)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_checked_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 13,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_checked_proto_goTypes,
+ DependencyIndexes: file_cel_expr_checked_proto_depIdxs,
+ EnumInfos: file_cel_expr_checked_proto_enumTypes,
+ MessageInfos: file_cel_expr_checked_proto_msgTypes,
+ }.Build()
+ File_cel_expr_checked_proto = out.File
+ file_cel_expr_checked_proto_rawDesc = nil
+ file_cel_expr_checked_proto_goTypes = nil
+ file_cel_expr_checked_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/cloudbuild.yaml b/vendor/cel.dev/expr/cloudbuild.yaml
new file mode 100644
index 000000000..e3e533a04
--- /dev/null
+++ b/vendor/cel.dev/expr/cloudbuild.yaml
@@ -0,0 +1,9 @@
+steps:
+- name: 'gcr.io/cloud-builders/bazel:7.3.2'
+ entrypoint: bazel
+ args: ['build', '...']
+ id: bazel-build
+ waitFor: ['-']
+timeout: 15m
+options:
+ machineType: 'N1_HIGHCPU_32'
diff --git a/vendor/cel.dev/expr/eval.pb.go b/vendor/cel.dev/expr/eval.pb.go
new file mode 100644
index 000000000..a7aae0900
--- /dev/null
+++ b/vendor/cel.dev/expr/eval.pb.go
@@ -0,0 +1,487 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.3
+// protoc v5.27.1
+// source: cel/expr/eval.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type EvalState struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EvalState) Reset() {
+ *x = EvalState{}
+ mi := &file_cel_expr_eval_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EvalState) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState) ProtoMessage() {}
+
+func (x *EvalState) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState.ProtoReflect.Descriptor instead.
+func (*EvalState) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *EvalState) GetValues() []*ExprValue {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *EvalState) GetResults() []*EvalState_Result {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+
+type ExprValue struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Kind:
+ //
+ // *ExprValue_Value
+ // *ExprValue_Error
+ // *ExprValue_Unknown
+ Kind isExprValue_Kind `protobuf_oneof:"kind"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ExprValue) Reset() {
+ *x = ExprValue{}
+ mi := &file_cel_expr_eval_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ExprValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExprValue) ProtoMessage() {}
+
+func (x *ExprValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead.
+func (*ExprValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ExprValue) GetKind() isExprValue_Kind {
+ if x != nil {
+ return x.Kind
+ }
+ return nil
+}
+
+func (x *ExprValue) GetValue() *Value {
+ if x != nil {
+ if x, ok := x.Kind.(*ExprValue_Value); ok {
+ return x.Value
+ }
+ }
+ return nil
+}
+
+func (x *ExprValue) GetError() *ErrorSet {
+ if x != nil {
+ if x, ok := x.Kind.(*ExprValue_Error); ok {
+ return x.Error
+ }
+ }
+ return nil
+}
+
+func (x *ExprValue) GetUnknown() *UnknownSet {
+ if x != nil {
+ if x, ok := x.Kind.(*ExprValue_Unknown); ok {
+ return x.Unknown
+ }
+ }
+ return nil
+}
+
+type isExprValue_Kind interface {
+ isExprValue_Kind()
+}
+
+type ExprValue_Value struct {
+ Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"`
+}
+
+type ExprValue_Error struct {
+ Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+type ExprValue_Unknown struct {
+ Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"`
+}
+
+func (*ExprValue_Value) isExprValue_Kind() {}
+
+func (*ExprValue_Error) isExprValue_Kind() {}
+
+func (*ExprValue_Unknown) isExprValue_Kind() {}
+
+type ErrorSet struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Errors []*Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ErrorSet) Reset() {
+ *x = ErrorSet{}
+ mi := &file_cel_expr_eval_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ErrorSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorSet) ProtoMessage() {}
+
+func (x *ErrorSet) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead.
+func (*ErrorSet) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ErrorSet) GetErrors() []*Status {
+ if x != nil {
+ return x.Errors
+ }
+ return nil
+}
+
+type Status struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Status) Reset() {
+ *x = Status{}
+ mi := &file_cel_expr_eval_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Status) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
+func (*Status) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Status) GetCode() int32 {
+ if x != nil {
+ return x.Code
+ }
+ return 0
+}
+
+func (x *Status) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+func (x *Status) GetDetails() []*anypb.Any {
+ if x != nil {
+ return x.Details
+ }
+ return nil
+}
+
+type UnknownSet struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *UnknownSet) Reset() {
+ *x = UnknownSet{}
+ mi := &file_cel_expr_eval_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UnknownSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnknownSet) ProtoMessage() {}
+
+func (x *UnknownSet) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead.
+func (*UnknownSet) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UnknownSet) GetExprs() []int64 {
+ if x != nil {
+ return x.Exprs
+ }
+ return nil
+}
+
+type EvalState_Result struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"`
+ Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EvalState_Result) Reset() {
+ *x = EvalState_Result{}
+ mi := &file_cel_expr_eval_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EvalState_Result) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState_Result) ProtoMessage() {}
+
+func (x *EvalState_Result) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead.
+func (*EvalState_Result) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *EvalState_Result) GetExpr() int64 {
+ if x != nil {
+ return x.Expr
+ }
+ return 0
+}
+
+func (x *EvalState_Result) GetValue() int64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+var File_cel_expr_eval_proto protoreflect.FileDescriptor
+
+var file_cel_expr_eval_proto_rawDesc = []byte{
+ 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f,
+ 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0xa2, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b,
+ 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63,
+ 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74,
+ 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65,
+ 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12,
+ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48,
+ 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e,
+ 0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48,
+ 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69,
+ 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x28,
+ 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
+ 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14,
+ 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65,
+ 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8,
+ 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_eval_proto_rawDescOnce sync.Once
+ file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc
+)
+
+func file_cel_expr_eval_proto_rawDescGZIP() []byte {
+ file_cel_expr_eval_proto_rawDescOnce.Do(func() {
+ file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData)
+ })
+ return file_cel_expr_eval_proto_rawDescData
+}
+
+var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_cel_expr_eval_proto_goTypes = []any{
+ (*EvalState)(nil), // 0: cel.expr.EvalState
+ (*ExprValue)(nil), // 1: cel.expr.ExprValue
+ (*ErrorSet)(nil), // 2: cel.expr.ErrorSet
+ (*Status)(nil), // 3: cel.expr.Status
+ (*UnknownSet)(nil), // 4: cel.expr.UnknownSet
+ (*EvalState_Result)(nil), // 5: cel.expr.EvalState.Result
+ (*Value)(nil), // 6: cel.expr.Value
+ (*anypb.Any)(nil), // 7: google.protobuf.Any
+}
+var file_cel_expr_eval_proto_depIdxs = []int32{
+ 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue
+ 5, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result
+ 6, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value
+ 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet
+ 4, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet
+ 3, // 5: cel.expr.ErrorSet.errors:type_name -> cel.expr.Status
+ 7, // 6: cel.expr.Status.details:type_name -> google.protobuf.Any
+ 7, // [7:7] is the sub-list for method output_type
+ 7, // [7:7] is the sub-list for method input_type
+ 7, // [7:7] is the sub-list for extension type_name
+ 7, // [7:7] is the sub-list for extension extendee
+ 0, // [0:7] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_eval_proto_init() }
+func file_cel_expr_eval_proto_init() {
+ if File_cel_expr_eval_proto != nil {
+ return
+ }
+ file_cel_expr_value_proto_init()
+ file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []any{
+ (*ExprValue_Value)(nil),
+ (*ExprValue_Error)(nil),
+ (*ExprValue_Unknown)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_eval_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 6,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_eval_proto_goTypes,
+ DependencyIndexes: file_cel_expr_eval_proto_depIdxs,
+ MessageInfos: file_cel_expr_eval_proto_msgTypes,
+ }.Build()
+ File_cel_expr_eval_proto = out.File
+ file_cel_expr_eval_proto_rawDesc = nil
+ file_cel_expr_eval_proto_goTypes = nil
+ file_cel_expr_eval_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/explain.pb.go b/vendor/cel.dev/expr/explain.pb.go
new file mode 100644
index 000000000..79fd5443b
--- /dev/null
+++ b/vendor/cel.dev/expr/explain.pb.go
@@ -0,0 +1,236 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/explain.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Deprecated: Do not use.
+type Explain struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
+}
+
+func (x *Explain) Reset() {
+ *x = Explain{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_explain_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain) ProtoMessage() {}
+
+func (x *Explain) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_explain_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain.ProtoReflect.Descriptor instead.
+func (*Explain) Descriptor() ([]byte, []int) {
+ return file_cel_expr_explain_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Explain) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *Explain) GetExprSteps() []*Explain_ExprStep {
+ if x != nil {
+ return x.ExprSteps
+ }
+ return nil
+}
+
+type Explain_ExprStep struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
+}
+
+func (x *Explain_ExprStep) Reset() {
+ *x = Explain_ExprStep{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_explain_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain_ExprStep) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain_ExprStep) ProtoMessage() {}
+
+func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_explain_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead.
+func (*Explain_ExprStep) Descriptor() ([]byte, []int) {
+ return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Explain_ExprStep) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Explain_ExprStep) GetValueIndex() int32 {
+ if x != nil {
+ return x.ValueIndex
+ }
+ return 0
+}
+
+var File_cel_expr_explain_proto protoreflect.FileDescriptor
+
+var file_cel_expr_explain_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a,
+ 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65,
+ 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e,
+ 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
+ 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_explain_proto_rawDescOnce sync.Once
+ file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc
+)
+
+func file_cel_expr_explain_proto_rawDescGZIP() []byte {
+ file_cel_expr_explain_proto_rawDescOnce.Do(func() {
+ file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData)
+ })
+ return file_cel_expr_explain_proto_rawDescData
+}
+
+var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_cel_expr_explain_proto_goTypes = []interface{}{
+ (*Explain)(nil), // 0: cel.expr.Explain
+ (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep
+ (*Value)(nil), // 2: cel.expr.Value
+}
+var file_cel_expr_explain_proto_depIdxs = []int32{
+ 2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value
+ 1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_explain_proto_init() }
+func file_cel_expr_explain_proto_init() {
+ if File_cel_expr_explain_proto != nil {
+ return
+ }
+ file_cel_expr_value_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain_ExprStep); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_explain_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_explain_proto_goTypes,
+ DependencyIndexes: file_cel_expr_explain_proto_depIdxs,
+ MessageInfos: file_cel_expr_explain_proto_msgTypes,
+ }.Build()
+ File_cel_expr_explain_proto = out.File
+ file_cel_expr_explain_proto_rawDesc = nil
+ file_cel_expr_explain_proto_goTypes = nil
+ file_cel_expr_explain_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/regen_go_proto.sh b/vendor/cel.dev/expr/regen_go_proto.sh
new file mode 100644
index 000000000..fdcbb3ce2
--- /dev/null
+++ b/vendor/cel.dev/expr/regen_go_proto.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+bazel build //proto/cel/expr/conformance/...
+files=($(bazel aquery 'kind(proto, //proto/cel/expr/conformance/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n"))
+for src in ${files[@]};
+do
+ dst=$(echo $src | sed 's/\(.*\/cel.dev\/expr\/\(.*\)\)/\2/')
+ echo "copying $dst"
+ $(cp $src $dst)
+done
diff --git a/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
new file mode 100644
index 000000000..9a13479e4
--- /dev/null
+++ b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+bazel build //proto/cel/expr:all
+
+rm -vf ./*.pb.go
+
+files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") )
+for src in "${files[@]}";
+do
+ cp -v "${src}" ./
+done
diff --git a/vendor/cel.dev/expr/syntax.pb.go b/vendor/cel.dev/expr/syntax.pb.go
new file mode 100644
index 000000000..48a952872
--- /dev/null
+++ b/vendor/cel.dev/expr/syntax.pb.go
@@ -0,0 +1,1633 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/syntax.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type SourceInfo_Extension_Component int32
+
+const (
+ SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0
+ SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1
+ SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2
+ SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3
+)
+
+// Enum value maps for SourceInfo_Extension_Component.
+var (
+ SourceInfo_Extension_Component_name = map[int32]string{
+ 0: "COMPONENT_UNSPECIFIED",
+ 1: "COMPONENT_PARSER",
+ 2: "COMPONENT_TYPE_CHECKER",
+ 3: "COMPONENT_RUNTIME",
+ }
+ SourceInfo_Extension_Component_value = map[string]int32{
+ "COMPONENT_UNSPECIFIED": 0,
+ "COMPONENT_PARSER": 1,
+ "COMPONENT_TYPE_CHECKER": 2,
+ "COMPONENT_RUNTIME": 3,
+ }
+)
+
+func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component {
+ p := new(SourceInfo_Extension_Component)
+ *p = x
+ return p
+}
+
+func (x SourceInfo_Extension_Component) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_syntax_proto_enumTypes[0].Descriptor()
+}
+
+func (SourceInfo_Extension_Component) Type() protoreflect.EnumType {
+ return &file_cel_expr_syntax_proto_enumTypes[0]
+}
+
+func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead.
+func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0}
+}
+
+type ParsedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"`
+ SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+}
+
+func (x *ParsedExpr) Reset() {
+ *x = ParsedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ParsedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ParsedExpr) ProtoMessage() {}
+
+func (x *ParsedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead.
+func (*ParsedExpr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ParsedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+func (x *ParsedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+type Expr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
+ // Types that are assignable to ExprKind:
+ //
+ // *Expr_ConstExpr
+ // *Expr_IdentExpr
+ // *Expr_SelectExpr
+ // *Expr_CallExpr
+ // *Expr_ListExpr
+ // *Expr_StructExpr
+ // *Expr_ComprehensionExpr
+ ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"`
+}
+
+func (x *Expr) Reset() {
+ *x = Expr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr) ProtoMessage() {}
+
+func (x *Expr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr.ProtoReflect.Descriptor instead.
+func (*Expr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Expr) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr) GetExprKind() isExpr_ExprKind {
+ if m != nil {
+ return m.ExprKind
+ }
+ return nil
+}
+
+func (x *Expr) GetConstExpr() *Constant {
+ if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok {
+ return x.ConstExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetIdentExpr() *Expr_Ident {
+ if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok {
+ return x.IdentExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetSelectExpr() *Expr_Select {
+ if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok {
+ return x.SelectExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetCallExpr() *Expr_Call {
+ if x, ok := x.GetExprKind().(*Expr_CallExpr); ok {
+ return x.CallExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetListExpr() *Expr_CreateList {
+ if x, ok := x.GetExprKind().(*Expr_ListExpr); ok {
+ return x.ListExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetStructExpr() *Expr_CreateStruct {
+ if x, ok := x.GetExprKind().(*Expr_StructExpr); ok {
+ return x.StructExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetComprehensionExpr() *Expr_Comprehension {
+ if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok {
+ return x.ComprehensionExpr
+ }
+ return nil
+}
+
+type isExpr_ExprKind interface {
+ isExpr_ExprKind()
+}
+
+type Expr_ConstExpr struct {
+ ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"`
+}
+
+type Expr_IdentExpr struct {
+ IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"`
+}
+
+type Expr_SelectExpr struct {
+ SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"`
+}
+
+type Expr_CallExpr struct {
+ CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"`
+}
+
+type Expr_ListExpr struct {
+ ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"`
+}
+
+type Expr_StructExpr struct {
+ StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"`
+}
+
+type Expr_ComprehensionExpr struct {
+ ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"`
+}
+
+func (*Expr_ConstExpr) isExpr_ExprKind() {}
+
+func (*Expr_IdentExpr) isExpr_ExprKind() {}
+
+func (*Expr_SelectExpr) isExpr_ExprKind() {}
+
+func (*Expr_CallExpr) isExpr_ExprKind() {}
+
+func (*Expr_ListExpr) isExpr_ExprKind() {}
+
+func (*Expr_StructExpr) isExpr_ExprKind() {}
+
+func (*Expr_ComprehensionExpr) isExpr_ExprKind() {}
+
+type Constant struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to ConstantKind:
+ //
+ // *Constant_NullValue
+ // *Constant_BoolValue
+ // *Constant_Int64Value
+ // *Constant_Uint64Value
+ // *Constant_DoubleValue
+ // *Constant_StringValue
+ // *Constant_BytesValue
+ // *Constant_DurationValue
+ // *Constant_TimestampValue
+ ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"`
+}
+
+func (x *Constant) Reset() {
+ *x = Constant{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Constant) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Constant) ProtoMessage() {}
+
+func (x *Constant) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Constant.ProtoReflect.Descriptor instead.
+func (*Constant) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2}
+}
+
+func (m *Constant) GetConstantKind() isConstant_ConstantKind {
+ if m != nil {
+ return m.ConstantKind
+ }
+ return nil
+}
+
+func (x *Constant) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetConstantKind().(*Constant_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Constant) GetBoolValue() bool {
+ if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Constant) GetInt64Value() int64 {
+ if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetUint64Value() uint64 {
+ if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetDoubleValue() float64 {
+ if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Constant) GetStringValue() string {
+ if x, ok := x.GetConstantKind().(*Constant_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Constant) GetBytesValue() []byte {
+ if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetDurationValue() *durationpb.Duration {
+ if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok {
+ return x.DurationValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetTimestampValue() *timestamppb.Timestamp {
+ if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok {
+ return x.TimestampValue
+ }
+ return nil
+}
+
+type isConstant_ConstantKind interface {
+ isConstant_ConstantKind()
+}
+
+type Constant_NullValue struct {
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Constant_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Constant_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Constant_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Constant_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Constant_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Constant_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Constant_DurationValue struct {
+ // Deprecated: Do not use.
+ DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"`
+}
+
+type Constant_TimestampValue struct {
+ // Deprecated: Do not use.
+ TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"`
+}
+
+func (*Constant_NullValue) isConstant_ConstantKind() {}
+
+func (*Constant_BoolValue) isConstant_ConstantKind() {}
+
+func (*Constant_Int64Value) isConstant_ConstantKind() {}
+
+func (*Constant_Uint64Value) isConstant_ConstantKind() {}
+
+func (*Constant_DoubleValue) isConstant_ConstantKind() {}
+
+func (*Constant_StringValue) isConstant_ConstantKind() {}
+
+func (*Constant_BytesValue) isConstant_ConstantKind() {}
+
+func (*Constant_DurationValue) isConstant_ConstantKind() {}
+
+func (*Constant_TimestampValue) isConstant_ConstantKind() {}
+
+type SourceInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"`
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"`
+ Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"`
+}
+
+func (x *SourceInfo) Reset() {
+ *x = SourceInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo) ProtoMessage() {}
+
+func (x *SourceInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead.
+func (*SourceInfo) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *SourceInfo) GetSyntaxVersion() string {
+ if x != nil {
+ return x.SyntaxVersion
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLineOffsets() []int32 {
+ if x != nil {
+ return x.LineOffsets
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetPositions() map[int64]int32 {
+ if x != nil {
+ return x.Positions
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetMacroCalls() map[int64]*Expr {
+ if x != nil {
+ return x.MacroCalls
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+type Expr_Ident struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *Expr_Ident) Reset() {
+ *x = Expr_Ident{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Ident) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Ident) ProtoMessage() {}
+
+func (x *Expr_Ident) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead.
+func (*Expr_Ident) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Expr_Ident) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type Expr_Select struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"`
+ Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"`
+ TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"`
+}
+
+func (x *Expr_Select) Reset() {
+ *x = Expr_Select{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Select) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Select) ProtoMessage() {}
+
+func (x *Expr_Select) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead.
+func (*Expr_Select) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Expr_Select) GetOperand() *Expr {
+ if x != nil {
+ return x.Operand
+ }
+ return nil
+}
+
+func (x *Expr_Select) GetField() string {
+ if x != nil {
+ return x.Field
+ }
+ return ""
+}
+
+func (x *Expr_Select) GetTestOnly() bool {
+ if x != nil {
+ return x.TestOnly
+ }
+ return false
+}
+
+type Expr_Call struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
+ Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"`
+ Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
+}
+
+func (x *Expr_Call) Reset() {
+ *x = Expr_Call{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Call) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Call) ProtoMessage() {}
+
+func (x *Expr_Call) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead.
+func (*Expr_Call) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Expr_Call) GetTarget() *Expr {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+
+func (x *Expr_Call) GetFunction() string {
+ if x != nil {
+ return x.Function
+ }
+ return ""
+}
+
+func (x *Expr_Call) GetArgs() []*Expr {
+ if x != nil {
+ return x.Args
+ }
+ return nil
+}
+
+type Expr_CreateList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"`
+ OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"`
+}
+
+func (x *Expr_CreateList) Reset() {
+ *x = Expr_CreateList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateList) ProtoMessage() {}
+
+func (x *Expr_CreateList) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead.
+func (*Expr_CreateList) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Expr_CreateList) GetElements() []*Expr {
+ if x != nil {
+ return x.Elements
+ }
+ return nil
+}
+
+func (x *Expr_CreateList) GetOptionalIndices() []int32 {
+ if x != nil {
+ return x.OptionalIndices
+ }
+ return nil
+}
+
+type Expr_CreateStruct struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"`
+ Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *Expr_CreateStruct) Reset() {
+ *x = Expr_CreateStruct{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct) ProtoMessage() {}
+
+func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4}
+}
+
+func (x *Expr_CreateStruct) GetMessageName() string {
+ if x != nil {
+ return x.MessageName
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type Expr_Comprehension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"`
+ IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"`
+ AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"`
+ AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"`
+ LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"`
+ LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"`
+ Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"`
+}
+
+func (x *Expr_Comprehension) Reset() {
+ *x = Expr_Comprehension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Comprehension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Comprehension) ProtoMessage() {}
+
+func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead.
+func (*Expr_Comprehension) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 5}
+}
+
+func (x *Expr_Comprehension) GetIterVar() string {
+ if x != nil {
+ return x.IterVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetIterRange() *Expr {
+ if x != nil {
+ return x.IterRange
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetAccuVar() string {
+ if x != nil {
+ return x.AccuVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetAccuInit() *Expr {
+ if x != nil {
+ return x.AccuInit
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopCondition() *Expr {
+ if x != nil {
+ return x.LoopCondition
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopStep() *Expr {
+ if x != nil {
+ return x.LoopStep
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetResult() *Expr {
+ if x != nil {
+ return x.Result
+ }
+ return nil
+}
+
+type Expr_CreateStruct_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ // Types that are assignable to KeyKind:
+ //
+ // *Expr_CreateStruct_Entry_FieldKey
+ // *Expr_CreateStruct_Entry_MapKey
+ KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"`
+ Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+ OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"`
+}
+
+func (x *Expr_CreateStruct_Entry) Reset() {
+ *x = Expr_CreateStruct_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct_Entry) ProtoMessage() {}
+
+func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4, 0}
+}
+
+func (x *Expr_CreateStruct_Entry) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind {
+ if m != nil {
+ return m.KeyKind
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetFieldKey() string {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok {
+ return x.FieldKey
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok {
+ return x.MapKey
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetValue() *Expr {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool {
+ if x != nil {
+ return x.OptionalEntry
+ }
+ return false
+}
+
+type isExpr_CreateStruct_Entry_KeyKind interface {
+ isExpr_CreateStruct_Entry_KeyKind()
+}
+
+type Expr_CreateStruct_Entry_FieldKey struct {
+ FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"`
+}
+
+type Expr_CreateStruct_Entry_MapKey struct {
+ MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"`
+}
+
+func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+type SourceInfo_Extension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"`
+ Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (x *SourceInfo_Extension) Reset() {
+ *x = SourceInfo_Extension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension) ProtoMessage() {}
+
+func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2}
+}
+
+func (x *SourceInfo_Extension) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component {
+ if x != nil {
+ return x.AffectedComponents
+ }
+ return nil
+}
+
+func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+type SourceInfo_Extension_Version struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
+ Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
+}
+
+func (x *SourceInfo_Extension_Version) Reset() {
+ *x = SourceInfo_Extension_Version{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension_Version) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension_Version) ProtoMessage() {}
+
+func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0}
+}
+
+func (x *SourceInfo_Extension_Version) GetMajor() int64 {
+ if x != nil {
+ return x.Major
+ }
+ return 0
+}
+
+func (x *SourceInfo_Extension_Version) GetMinor() int64 {
+ if x != nil {
+ return x.Minor
+ }
+ return 0
+}
+
+var File_cel_expr_syntax_proto protoreflect.FileDescriptor
+
+var file_cel_expr_syntax_proto_rawDesc = []byte{
+ 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61,
+ 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22,
+ 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63,
+ 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78,
+ 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78,
+ 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02,
+ 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f,
+ 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e,
+ 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38,
+ 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65,
+ 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c,
+ 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09,
+ 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69,
+ 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75,
+ 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65,
+ 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07,
+ 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f,
+ 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09,
+ 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c,
+ 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
+ 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab,
+ 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a,
+ 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f,
+ 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70,
+ 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78,
+ 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a,
+ 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19,
+ 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65,
+ 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69,
+ 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75,
+ 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75,
+ 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74,
+ 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65,
+ 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f,
+ 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09,
+ 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c,
+ 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74,
+ 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36,
+ 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52,
+ 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c,
+ 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62,
+ 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01,
+ 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d,
+ 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06,
+ 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e,
+ 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63,
+ 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f,
+ 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e,
+ 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61,
+ 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63,
+ 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12,
+ 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e,
+ 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
+ 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43,
+ 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50,
+ 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54,
+ 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d,
+ 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43,
+ 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45,
+ 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c,
+ 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79,
+ 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c,
+ 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_syntax_proto_rawDescOnce sync.Once
+ file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc
+)
+
+func file_cel_expr_syntax_proto_rawDescGZIP() []byte {
+ file_cel_expr_syntax_proto_rawDescOnce.Do(func() {
+ file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData)
+ })
+ return file_cel_expr_syntax_proto_rawDescData
+}
+
+var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_cel_expr_syntax_proto_goTypes = []interface{}{
+ (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component
+ (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr
+ (*Expr)(nil), // 2: cel.expr.Expr
+ (*Constant)(nil), // 3: cel.expr.Constant
+ (*SourceInfo)(nil), // 4: cel.expr.SourceInfo
+ (*Expr_Ident)(nil), // 5: cel.expr.Expr.Ident
+ (*Expr_Select)(nil), // 6: cel.expr.Expr.Select
+ (*Expr_Call)(nil), // 7: cel.expr.Expr.Call
+ (*Expr_CreateList)(nil), // 8: cel.expr.Expr.CreateList
+ (*Expr_CreateStruct)(nil), // 9: cel.expr.Expr.CreateStruct
+ (*Expr_Comprehension)(nil), // 10: cel.expr.Expr.Comprehension
+ (*Expr_CreateStruct_Entry)(nil), // 11: cel.expr.Expr.CreateStruct.Entry
+ nil, // 12: cel.expr.SourceInfo.PositionsEntry
+ nil, // 13: cel.expr.SourceInfo.MacroCallsEntry
+ (*SourceInfo_Extension)(nil), // 14: cel.expr.SourceInfo.Extension
+ (*SourceInfo_Extension_Version)(nil), // 15: cel.expr.SourceInfo.Extension.Version
+ (structpb.NullValue)(0), // 16: google.protobuf.NullValue
+ (*durationpb.Duration)(nil), // 17: google.protobuf.Duration
+ (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp
+}
+var file_cel_expr_syntax_proto_depIdxs = []int32{
+ 2, // 0: cel.expr.ParsedExpr.expr:type_name -> cel.expr.Expr
+ 4, // 1: cel.expr.ParsedExpr.source_info:type_name -> cel.expr.SourceInfo
+ 3, // 2: cel.expr.Expr.const_expr:type_name -> cel.expr.Constant
+ 5, // 3: cel.expr.Expr.ident_expr:type_name -> cel.expr.Expr.Ident
+ 6, // 4: cel.expr.Expr.select_expr:type_name -> cel.expr.Expr.Select
+ 7, // 5: cel.expr.Expr.call_expr:type_name -> cel.expr.Expr.Call
+ 8, // 6: cel.expr.Expr.list_expr:type_name -> cel.expr.Expr.CreateList
+ 9, // 7: cel.expr.Expr.struct_expr:type_name -> cel.expr.Expr.CreateStruct
+ 10, // 8: cel.expr.Expr.comprehension_expr:type_name -> cel.expr.Expr.Comprehension
+ 16, // 9: cel.expr.Constant.null_value:type_name -> google.protobuf.NullValue
+ 17, // 10: cel.expr.Constant.duration_value:type_name -> google.protobuf.Duration
+ 18, // 11: cel.expr.Constant.timestamp_value:type_name -> google.protobuf.Timestamp
+ 12, // 12: cel.expr.SourceInfo.positions:type_name -> cel.expr.SourceInfo.PositionsEntry
+ 13, // 13: cel.expr.SourceInfo.macro_calls:type_name -> cel.expr.SourceInfo.MacroCallsEntry
+ 14, // 14: cel.expr.SourceInfo.extensions:type_name -> cel.expr.SourceInfo.Extension
+ 2, // 15: cel.expr.Expr.Select.operand:type_name -> cel.expr.Expr
+ 2, // 16: cel.expr.Expr.Call.target:type_name -> cel.expr.Expr
+ 2, // 17: cel.expr.Expr.Call.args:type_name -> cel.expr.Expr
+ 2, // 18: cel.expr.Expr.CreateList.elements:type_name -> cel.expr.Expr
+ 11, // 19: cel.expr.Expr.CreateStruct.entries:type_name -> cel.expr.Expr.CreateStruct.Entry
+ 2, // 20: cel.expr.Expr.Comprehension.iter_range:type_name -> cel.expr.Expr
+ 2, // 21: cel.expr.Expr.Comprehension.accu_init:type_name -> cel.expr.Expr
+ 2, // 22: cel.expr.Expr.Comprehension.loop_condition:type_name -> cel.expr.Expr
+ 2, // 23: cel.expr.Expr.Comprehension.loop_step:type_name -> cel.expr.Expr
+ 2, // 24: cel.expr.Expr.Comprehension.result:type_name -> cel.expr.Expr
+ 2, // 25: cel.expr.Expr.CreateStruct.Entry.map_key:type_name -> cel.expr.Expr
+ 2, // 26: cel.expr.Expr.CreateStruct.Entry.value:type_name -> cel.expr.Expr
+ 2, // 27: cel.expr.SourceInfo.MacroCallsEntry.value:type_name -> cel.expr.Expr
+ 0, // 28: cel.expr.SourceInfo.Extension.affected_components:type_name -> cel.expr.SourceInfo.Extension.Component
+ 15, // 29: cel.expr.SourceInfo.Extension.version:type_name -> cel.expr.SourceInfo.Extension.Version
+ 30, // [30:30] is the sub-list for method output_type
+ 30, // [30:30] is the sub-list for method input_type
+ 30, // [30:30] is the sub-list for extension type_name
+ 30, // [30:30] is the sub-list for extension extendee
+ 0, // [0:30] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_syntax_proto_init() }
+func file_cel_expr_syntax_proto_init() {
+ if File_cel_expr_syntax_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ParsedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Constant); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Ident); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Select); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Call); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Comprehension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension_Version); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Expr_ConstExpr)(nil),
+ (*Expr_IdentExpr)(nil),
+ (*Expr_SelectExpr)(nil),
+ (*Expr_CallExpr)(nil),
+ (*Expr_ListExpr)(nil),
+ (*Expr_StructExpr)(nil),
+ (*Expr_ComprehensionExpr)(nil),
+ }
+ file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Constant_NullValue)(nil),
+ (*Constant_BoolValue)(nil),
+ (*Constant_Int64Value)(nil),
+ (*Constant_Uint64Value)(nil),
+ (*Constant_DoubleValue)(nil),
+ (*Constant_StringValue)(nil),
+ (*Constant_BytesValue)(nil),
+ (*Constant_DurationValue)(nil),
+ (*Constant_TimestampValue)(nil),
+ }
+ file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{
+ (*Expr_CreateStruct_Entry_FieldKey)(nil),
+ (*Expr_CreateStruct_Entry_MapKey)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_syntax_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 15,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_syntax_proto_goTypes,
+ DependencyIndexes: file_cel_expr_syntax_proto_depIdxs,
+ EnumInfos: file_cel_expr_syntax_proto_enumTypes,
+ MessageInfos: file_cel_expr_syntax_proto_msgTypes,
+ }.Build()
+ File_cel_expr_syntax_proto = out.File
+ file_cel_expr_syntax_proto_rawDesc = nil
+ file_cel_expr_syntax_proto_goTypes = nil
+ file_cel_expr_syntax_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/value.pb.go b/vendor/cel.dev/expr/value.pb.go
new file mode 100644
index 000000000..e5e29228c
--- /dev/null
+++ b/vendor/cel.dev/expr/value.pb.go
@@ -0,0 +1,653 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/value.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Value struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Kind:
+ //
+ // *Value_NullValue
+ // *Value_BoolValue
+ // *Value_Int64Value
+ // *Value_Uint64Value
+ // *Value_DoubleValue
+ // *Value_StringValue
+ // *Value_BytesValue
+ // *Value_EnumValue
+ // *Value_ObjectValue
+ // *Value_MapValue
+ // *Value_ListValue
+ // *Value_TypeValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (x *Value) Reset() {
+ *x = Value{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Value) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Value) ProtoMessage() {}
+
+func (x *Value) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Value.ProtoReflect.Descriptor instead.
+func (*Value) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (x *Value) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Value) GetBoolValue() bool {
+ if x, ok := x.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Value) GetInt64Value() int64 {
+ if x, ok := x.GetKind().(*Value_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Value) GetUint64Value() uint64 {
+ if x, ok := x.GetKind().(*Value_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Value) GetDoubleValue() float64 {
+ if x, ok := x.GetKind().(*Value_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Value) GetStringValue() string {
+ if x, ok := x.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Value) GetBytesValue() []byte {
+ if x, ok := x.GetKind().(*Value_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+func (x *Value) GetEnumValue() *EnumValue {
+ if x, ok := x.GetKind().(*Value_EnumValue); ok {
+ return x.EnumValue
+ }
+ return nil
+}
+
+func (x *Value) GetObjectValue() *anypb.Any {
+ if x, ok := x.GetKind().(*Value_ObjectValue); ok {
+ return x.ObjectValue
+ }
+ return nil
+}
+
+func (x *Value) GetMapValue() *MapValue {
+ if x, ok := x.GetKind().(*Value_MapValue); ok {
+ return x.MapValue
+ }
+ return nil
+}
+
+func (x *Value) GetListValue() *ListValue {
+ if x, ok := x.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+func (x *Value) GetTypeValue() string {
+ if x, ok := x.GetKind().(*Value_TypeValue); ok {
+ return x.TypeValue
+ }
+ return ""
+}
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Value_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Value_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Value_EnumValue struct {
+ EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"`
+}
+
+type Value_ObjectValue struct {
+ ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"`
+}
+
+type Value_MapValue struct {
+ MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+ ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+type Value_TypeValue struct {
+ TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_Int64Value) isValue_Kind() {}
+
+func (*Value_Uint64Value) isValue_Kind() {}
+
+func (*Value_DoubleValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BytesValue) isValue_Kind() {}
+
+func (*Value_EnumValue) isValue_Kind() {}
+
+func (*Value_ObjectValue) isValue_Kind() {}
+
+func (*Value_MapValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (*Value_TypeValue) isValue_Kind() {}
+
+type EnumValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *EnumValue) Reset() {
+ *x = EnumValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnumValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumValue) ProtoMessage() {}
+
+func (x *EnumValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead.
+func (*EnumValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *EnumValue) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *EnumValue) GetValue() int32 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+type ListValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *ListValue) Reset() {
+ *x = ListValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListValue) ProtoMessage() {}
+
+func (x *ListValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
+func (*ListValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListValue) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+type MapValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *MapValue) Reset() {
+ *x = MapValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue) ProtoMessage() {}
+
+func (x *MapValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue.ProtoReflect.Descriptor instead.
+func (*MapValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *MapValue) GetEntries() []*MapValue_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type MapValue_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *MapValue_Entry) Reset() {
+ *x = MapValue_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue_Entry) ProtoMessage() {}
+
+func (x *MapValue_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead.
+func (*MapValue_Entry) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *MapValue_Entry) GetKey() *Value {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *MapValue_Entry) GetValue() *Value {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_cel_expr_value_proto protoreflect.FileDescriptor
+
+var file_cel_expr_value_proto_rawDesc = []byte{
+ 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72,
+ 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69,
+ 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
+ 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48,
+ 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23,
+ 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65,
+ 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48,
+ 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00,
+ 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69,
+ 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75,
+ 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a,
+ 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
+ 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65,
+ 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
+ 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_cel_expr_value_proto_rawDescOnce sync.Once
+ file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc
+)
+
+func file_cel_expr_value_proto_rawDescGZIP() []byte {
+ file_cel_expr_value_proto_rawDescOnce.Do(func() {
+ file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData)
+ })
+ return file_cel_expr_value_proto_rawDescData
+}
+
+var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_cel_expr_value_proto_goTypes = []interface{}{
+ (*Value)(nil), // 0: cel.expr.Value
+ (*EnumValue)(nil), // 1: cel.expr.EnumValue
+ (*ListValue)(nil), // 2: cel.expr.ListValue
+ (*MapValue)(nil), // 3: cel.expr.MapValue
+ (*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry
+ (structpb.NullValue)(0), // 5: google.protobuf.NullValue
+ (*anypb.Any)(nil), // 6: google.protobuf.Any
+}
+var file_cel_expr_value_proto_depIdxs = []int32{
+ 5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue
+ 1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue
+ 6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any
+ 3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue
+ 2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue
+ 0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value
+ 4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry
+ 0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value
+ 0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_value_proto_init() }
+func file_cel_expr_value_proto_init() {
+ if File_cel_expr_value_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Value); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnumValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_Int64Value)(nil),
+ (*Value_Uint64Value)(nil),
+ (*Value_DoubleValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BytesValue)(nil),
+ (*Value_EnumValue)(nil),
+ (*Value_ObjectValue)(nil),
+ (*Value_MapValue)(nil),
+ (*Value_ListValue)(nil),
+ (*Value_TypeValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_value_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_value_proto_goTypes,
+ DependencyIndexes: file_cel_expr_value_proto_depIdxs,
+ MessageInfos: file_cel_expr_value_proto_msgTypes,
+ }.Build()
+ File_cel_expr_value_proto = out.File
+ file_cel_expr_value_proto_rawDesc = nil
+ file_cel_expr_value_proto_goTypes = nil
+ file_cel_expr_value_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
deleted file mode 100644
index 52cf18e42..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright 2021 The ANTLR Project
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
- 3. Neither the name of the copyright holder nor the names of its
- contributors may be used to endorse or promote products derived from this
- software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
deleted file mode 100644
index ab5121267..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
-Package antlr implements the Go version of the ANTLR 4 runtime.
-
-# The ANTLR Tool
-
-ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
-or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
-From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
-(or visitor) that makes it easy to respond to the recognition of phrases of interest.
-
-# Code Generation
-
-ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
-runtime library, written specifically to support the generated code in the target language. This library is the
-runtime for the Go target.
-
-To generate code for the go target, it is generally recommended to place the source grammar files in a package of
-their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
-it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
-that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other
-way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
-your IDE, or configuration in your CI system.
-
-Here is a general template for an ANTLR based recognizer in Go:
-
- .
- ├── myproject
- ├── parser
- │ ├── mygrammar.g4
- │ ├── antlr-4.12.0-complete.jar
- │ ├── error_listeners.go
- │ ├── generate.go
- │ ├── generate.sh
- ├── go.mod
- ├── go.sum
- ├── main.go
- └── main_test.go
-
-Make sure that the package statement in your grammar file(s) reflects the go package they exist in.
-The generate.go file then looks like this:
-
- package parser
-
- //go:generate ./generate.sh
-
-And the generate.sh file will look similar to this:
-
- #!/bin/sh
-
- alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
- antlr4 -Dlanguage=Go -no-visitor -package parser *.g4
-
-depending on whether you want visitors or listeners or any other ANTLR options.
-
-From the command line at the root of your package “myproject” you can then simply issue the command:
-
- go generate ./...
-
-# Copyright Notice
-
-Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-
-Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
-
-[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
-[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
-*/
-package antlr
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
deleted file mode 100644
index 7619fa172..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
-)
-
-// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
-// context). The syntactic context is a graph-structured stack node whose
-// path(s) to the root is the rule invocation(s) chain used to arrive at the
-// state. The semantic context is the tree of semantic predicates encountered
-// before reaching an ATN state.
-type ATNConfig interface {
- Equals(o Collectable[ATNConfig]) bool
- Hash() int
-
- GetState() ATNState
- GetAlt() int
- GetSemanticContext() SemanticContext
-
- GetContext() PredictionContext
- SetContext(PredictionContext)
-
- GetReachesIntoOuterContext() int
- SetReachesIntoOuterContext(int)
-
- String() string
-
- getPrecedenceFilterSuppressed() bool
- setPrecedenceFilterSuppressed(bool)
-}
-
-type BaseATNConfig struct {
- precedenceFilterSuppressed bool
- state ATNState
- alt int
- context PredictionContext
- semanticContext SemanticContext
- reachesIntoOuterContext int
-}
-
-func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup
- return &BaseATNConfig{
- state: old.state,
- alt: old.alt,
- context: old.context,
- semanticContext: old.semanticContext,
- reachesIntoOuterContext: old.reachesIntoOuterContext,
- }
-}
-
-func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
- return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
-}
-
-func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
- if semanticContext == nil {
- panic("semanticContext cannot be nil") // TODO: Necessary?
- }
-
- return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
-}
-
-func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
- return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
-}
-
-func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
- return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
-}
-
-func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
- return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
-}
-
-func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
- return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
-}
-
-func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
- if semanticContext == nil {
- panic("semanticContext cannot be nil")
- }
-
- return &BaseATNConfig{
- state: state,
- alt: c.GetAlt(),
- context: context,
- semanticContext: semanticContext,
- reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
- precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
- }
-}
-
-func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
- return b.precedenceFilterSuppressed
-}
-
-func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
- b.precedenceFilterSuppressed = v
-}
-
-func (b *BaseATNConfig) GetState() ATNState {
- return b.state
-}
-
-func (b *BaseATNConfig) GetAlt() int {
- return b.alt
-}
-
-func (b *BaseATNConfig) SetContext(v PredictionContext) {
- b.context = v
-}
-func (b *BaseATNConfig) GetContext() PredictionContext {
- return b.context
-}
-
-func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
- return b.semanticContext
-}
-
-func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
- return b.reachesIntoOuterContext
-}
-
-func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
- b.reachesIntoOuterContext = v
-}
-
-// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
-// for a collection.
-//
-// An ATN configuration is equal to another if both have the same state, they
-// predict the same alternative, and syntactic/semantic contexts are the same.
-func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool {
- if b == o {
- return true
- } else if o == nil {
- return false
- }
-
- var other, ok = o.(*BaseATNConfig)
-
- if !ok {
- return false
- }
-
- var equal bool
-
- if b.context == nil {
- equal = other.context == nil
- } else {
- equal = b.context.Equals(other.context)
- }
-
- var (
- nums = b.state.GetStateNumber() == other.state.GetStateNumber()
- alts = b.alt == other.alt
- cons = b.semanticContext.Equals(other.semanticContext)
- sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
- )
-
- return nums && alts && cons && sups && equal
-}
-
-// Hash is the default hash function for BaseATNConfig, when no specialist hash function
-// is required for a collection
-func (b *BaseATNConfig) Hash() int {
- var c int
- if b.context != nil {
- c = b.context.Hash()
- }
-
- h := murmurInit(7)
- h = murmurUpdate(h, b.state.GetStateNumber())
- h = murmurUpdate(h, b.alt)
- h = murmurUpdate(h, c)
- h = murmurUpdate(h, b.semanticContext.Hash())
- return murmurFinish(h, 4)
-}
-
-func (b *BaseATNConfig) String() string {
- var s1, s2, s3 string
-
- if b.context != nil {
- s1 = ",[" + fmt.Sprint(b.context) + "]"
- }
-
- if b.semanticContext != SemanticContextNone {
- s2 = "," + fmt.Sprint(b.semanticContext)
- }
-
- if b.reachesIntoOuterContext > 0 {
- s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
- }
-
- return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
-}
-
-type LexerATNConfig struct {
- *BaseATNConfig
- lexerActionExecutor *LexerActionExecutor
- passedThroughNonGreedyDecision bool
-}
-
-func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
- return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
-}
-
-func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
- return &LexerATNConfig{
- BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
- lexerActionExecutor: lexerActionExecutor,
- }
-}
-
-func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
- return &LexerATNConfig{
- BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
- lexerActionExecutor: c.lexerActionExecutor,
- passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
- }
-}
-
-func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
- return &LexerATNConfig{
- BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
- lexerActionExecutor: lexerActionExecutor,
- passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
- }
-}
-
-func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
- return &LexerATNConfig{
- BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
- lexerActionExecutor: c.lexerActionExecutor,
- passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
- }
-}
-
-func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
- return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
-}
-
-// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via
-// the default comparator [ObjEqComparator].
-func (l *LexerATNConfig) Hash() int {
- var f int
- if l.passedThroughNonGreedyDecision {
- f = 1
- } else {
- f = 0
- }
- h := murmurInit(7)
- h = murmurUpdate(h, l.state.GetStateNumber())
- h = murmurUpdate(h, l.alt)
- h = murmurUpdate(h, l.context.Hash())
- h = murmurUpdate(h, l.semanticContext.Hash())
- h = murmurUpdate(h, f)
- h = murmurUpdate(h, l.lexerActionExecutor.Hash())
- h = murmurFinish(h, 6)
- return h
-}
-
-// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via
-// the default comparator [ObjEqComparator].
-func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool {
- if l == other {
- return true
- }
- var othert, ok = other.(*LexerATNConfig)
-
- if l == other {
- return true
- } else if !ok {
- return false
- } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
- return false
- }
-
- var b bool
-
- if l.lexerActionExecutor != nil {
- b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor)
- } else {
- b = othert.lexerActionExecutor != nil
- }
-
- if b {
- return false
- }
-
- return l.BaseATNConfig.Equals(othert.BaseATNConfig)
-}
-
-func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
- var ds, ok = target.(DecisionState)
-
- return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
deleted file mode 100644
index 43e9b33f3..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
+++ /dev/null
@@ -1,441 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
-)
-
-type ATNConfigSet interface {
- Hash() int
- Equals(o Collectable[ATNConfig]) bool
- Add(ATNConfig, *DoubleDict) bool
- AddAll([]ATNConfig) bool
-
- GetStates() *JStore[ATNState, Comparator[ATNState]]
- GetPredicates() []SemanticContext
- GetItems() []ATNConfig
-
- OptimizeConfigs(interpreter *BaseATNSimulator)
-
- Length() int
- IsEmpty() bool
- Contains(ATNConfig) bool
- ContainsFast(ATNConfig) bool
- Clear()
- String() string
-
- HasSemanticContext() bool
- SetHasSemanticContext(v bool)
-
- ReadOnly() bool
- SetReadOnly(bool)
-
- GetConflictingAlts() *BitSet
- SetConflictingAlts(*BitSet)
-
- Alts() *BitSet
-
- FullContext() bool
-
- GetUniqueAlt() int
- SetUniqueAlt(int)
-
- GetDipsIntoOuterContext() bool
- SetDipsIntoOuterContext(bool)
-}
-
-// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
-// about its elements and can combine similar configurations using a
-// graph-structured stack.
-type BaseATNConfigSet struct {
- cachedHash int
-
- // configLookup is used to determine whether two BaseATNConfigSets are equal. We
- // need all configurations with the same (s, i, _, semctx) to be equal. A key
- // effectively doubles the number of objects associated with ATNConfigs. All
- // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
- // read-only because a set becomes a DFA state.
- configLookup *JStore[ATNConfig, Comparator[ATNConfig]]
-
- // configs is the added elements.
- configs []ATNConfig
-
- // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
- // info together because it saves recomputation. Can we track conflicts as they
- // are added to save scanning configs later?
- conflictingAlts *BitSet
-
- // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
- // we hit a pred while computing a closure operation. Do not make a DFA state
- // from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
- dipsIntoOuterContext bool
-
- // fullCtx is whether it is part of a full context LL prediction. Used to
- // determine how to merge $. It is a wildcard with SLL, but not for an LL
- // context merge.
- fullCtx bool
-
- // Used in parser and lexer. In lexer, it indicates we hit a pred
- // while computing a closure operation. Don't make a DFA state from a.
- hasSemanticContext bool
-
- // readOnly is whether it is read-only. Do not
- // allow any code to manipulate the set if true because DFA states will point at
- // sets and those must not change. It not, protect other fields; conflictingAlts
- // in particular, which is assigned after readOnly.
- readOnly bool
-
- // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
- // info together because it saves recomputation. Can we track conflicts as they
- // are added to save scanning configs later?
- uniqueAlt int
-}
-
-func (b *BaseATNConfigSet) Alts() *BitSet {
- alts := NewBitSet()
- for _, it := range b.configs {
- alts.add(it.GetAlt())
- }
- return alts
-}
-
-func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
- return &BaseATNConfigSet{
- cachedHash: -1,
- configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst),
- fullCtx: fullCtx,
- }
-}
-
-// Add merges contexts with existing configs for (s, i, pi, _), where s is the
-// ATNConfig.state, i is the ATNConfig.alt, and pi is the
-// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
-// dipsIntoOuterContext and hasSemanticContext when necessary.
-func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
- if b.readOnly {
- panic("set is read-only")
- }
-
- if config.GetSemanticContext() != SemanticContextNone {
- b.hasSemanticContext = true
- }
-
- if config.GetReachesIntoOuterContext() > 0 {
- b.dipsIntoOuterContext = true
- }
-
- existing, present := b.configLookup.Put(config)
-
- // The config was not already in the set
- //
- if !present {
- b.cachedHash = -1
- b.configs = append(b.configs, config) // Track order here
- return true
- }
-
- // Merge a previous (s, i, pi, _) with it and save the result
- rootIsWildcard := !b.fullCtx
- merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
-
- // No need to check for existing.context because config.context is in the cache,
- // since the only way to create new graphs is the "call rule" and here. We cache
- // at both places.
- existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
-
- // Preserve the precedence filter suppression during the merge
- if config.getPrecedenceFilterSuppressed() {
- existing.setPrecedenceFilterSuppressed(true)
- }
-
- // Replace the context because there is no need to do alt mapping
- existing.SetContext(merged)
-
- return true
-}
-
-func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
-
- // states uses the standard comparator provided by the ATNState instance
- //
- states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst)
-
- for i := 0; i < len(b.configs); i++ {
- states.Put(b.configs[i].GetState())
- }
-
- return states
-}
-
-func (b *BaseATNConfigSet) HasSemanticContext() bool {
- return b.hasSemanticContext
-}
-
-func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
- b.hasSemanticContext = v
-}
-
-func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
- preds := make([]SemanticContext, 0)
-
- for i := 0; i < len(b.configs); i++ {
- c := b.configs[i].GetSemanticContext()
-
- if c != SemanticContextNone {
- preds = append(preds, c)
- }
- }
-
- return preds
-}
-
-func (b *BaseATNConfigSet) GetItems() []ATNConfig {
- return b.configs
-}
-
-func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
- if b.readOnly {
- panic("set is read-only")
- }
-
- if b.configLookup.Len() == 0 {
- return
- }
-
- for i := 0; i < len(b.configs); i++ {
- config := b.configs[i]
-
- config.SetContext(interpreter.getCachedContext(config.GetContext()))
- }
-}
-
-func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
- for i := 0; i < len(coll); i++ {
- b.Add(coll[i], nil)
- }
-
- return false
-}
-
-// Compare is a hack function just to verify that adding DFAstares to the known
-// set works, so long as comparison of ATNConfigSet s works. For that to work, we
-// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't
-// know the order, so we do this inefficient hack. If this proves the point, then
-// we can change the config set to a better structure.
-func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool {
- if len(b.configs) != len(bs.configs) {
- return false
- }
-
- for _, c := range b.configs {
- found := false
- for _, c2 := range bs.configs {
- if c.Equals(c2) {
- found = true
- break
- }
- }
-
- if !found {
- return false
- }
-
- }
- return true
-}
-
-func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
- if b == other {
- return true
- } else if _, ok := other.(*BaseATNConfigSet); !ok {
- return false
- }
-
- other2 := other.(*BaseATNConfigSet)
-
- return b.configs != nil &&
- b.fullCtx == other2.fullCtx &&
- b.uniqueAlt == other2.uniqueAlt &&
- b.conflictingAlts == other2.conflictingAlts &&
- b.hasSemanticContext == other2.hasSemanticContext &&
- b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
- b.Compare(other2)
-}
-
-func (b *BaseATNConfigSet) Hash() int {
- if b.readOnly {
- if b.cachedHash == -1 {
- b.cachedHash = b.hashCodeConfigs()
- }
-
- return b.cachedHash
- }
-
- return b.hashCodeConfigs()
-}
-
-func (b *BaseATNConfigSet) hashCodeConfigs() int {
- h := 1
- for _, config := range b.configs {
- h = 31*h + config.Hash()
- }
- return h
-}
-
-func (b *BaseATNConfigSet) Length() int {
- return len(b.configs)
-}
-
-func (b *BaseATNConfigSet) IsEmpty() bool {
- return len(b.configs) == 0
-}
-
-func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
- if b.configLookup == nil {
- panic("not implemented for read-only sets")
- }
-
- return b.configLookup.Contains(item)
-}
-
-func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
- if b.configLookup == nil {
- panic("not implemented for read-only sets")
- }
-
- return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set
-}
-
-func (b *BaseATNConfigSet) Clear() {
- if b.readOnly {
- panic("set is read-only")
- }
-
- b.configs = make([]ATNConfig, 0)
- b.cachedHash = -1
- b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
-}
-
-func (b *BaseATNConfigSet) FullContext() bool {
- return b.fullCtx
-}
-
-func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
- return b.dipsIntoOuterContext
-}
-
-func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
- b.dipsIntoOuterContext = v
-}
-
-func (b *BaseATNConfigSet) GetUniqueAlt() int {
- return b.uniqueAlt
-}
-
-func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
- b.uniqueAlt = v
-}
-
-func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
- return b.conflictingAlts
-}
-
-func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
- b.conflictingAlts = v
-}
-
-func (b *BaseATNConfigSet) ReadOnly() bool {
- return b.readOnly
-}
-
-func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
- b.readOnly = readOnly
-
- if readOnly {
- b.configLookup = nil // Read only, so no need for the lookup cache
- }
-}
-
-func (b *BaseATNConfigSet) String() string {
- s := "["
-
- for i, c := range b.configs {
- s += c.String()
-
- if i != len(b.configs)-1 {
- s += ", "
- }
- }
-
- s += "]"
-
- if b.hasSemanticContext {
- s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
- }
-
- if b.uniqueAlt != ATNInvalidAltNumber {
- s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
- }
-
- if b.conflictingAlts != nil {
- s += ",conflictingAlts=" + b.conflictingAlts.String()
- }
-
- if b.dipsIntoOuterContext {
- s += ",dipsIntoOuterContext"
- }
-
- return s
-}
-
-type OrderedATNConfigSet struct {
- *BaseATNConfigSet
-}
-
-func NewOrderedATNConfigSet() *OrderedATNConfigSet {
- b := NewBaseATNConfigSet(false)
-
- // This set uses the standard Hash() and Equals() from ATNConfig
- b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
-
- return &OrderedATNConfigSet{BaseATNConfigSet: b}
-}
-
-func hashATNConfig(i interface{}) int {
- o := i.(ATNConfig)
- hash := 7
- hash = 31*hash + o.GetState().GetStateNumber()
- hash = 31*hash + o.GetAlt()
- hash = 31*hash + o.GetSemanticContext().Hash()
- return hash
-}
-
-func equalATNConfigs(a, b interface{}) bool {
- if a == nil || b == nil {
- return false
- }
-
- if a == b {
- return true
- }
-
- var ai, ok = a.(ATNConfig)
- var bi, ok1 = b.(ATNConfig)
-
- if !ok || !ok1 {
- return false
- }
-
- if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() {
- return false
- }
-
- if ai.GetAlt() != bi.GetAlt() {
- return false
- }
-
- return ai.GetSemanticContext().Equals(bi.GetSemanticContext())
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
deleted file mode 100644
index 41529115f..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
-
-type IATNSimulator interface {
- SharedContextCache() *PredictionContextCache
- ATN() *ATN
- DecisionToDFA() []*DFA
-}
-
-type BaseATNSimulator struct {
- atn *ATN
- sharedContextCache *PredictionContextCache
- decisionToDFA []*DFA
-}
-
-func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
- b := new(BaseATNSimulator)
-
- b.atn = atn
- b.sharedContextCache = sharedContextCache
-
- return b
-}
-
-func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
- if b.sharedContextCache == nil {
- return context
- }
-
- visited := make(map[PredictionContext]PredictionContext)
-
- return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
-}
-
-func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
- return b.sharedContextCache
-}
-
-func (b *BaseATNSimulator) ATN() *ATN {
- return b.atn
-}
-
-func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
- return b.decisionToDFA
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
deleted file mode 100644
index 1f2a56bc3..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
+++ /dev/null
@@ -1,393 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import "strconv"
-
-// Constants for serialization.
-const (
- ATNStateInvalidType = 0
- ATNStateBasic = 1
- ATNStateRuleStart = 2
- ATNStateBlockStart = 3
- ATNStatePlusBlockStart = 4
- ATNStateStarBlockStart = 5
- ATNStateTokenStart = 6
- ATNStateRuleStop = 7
- ATNStateBlockEnd = 8
- ATNStateStarLoopBack = 9
- ATNStateStarLoopEntry = 10
- ATNStatePlusLoopBack = 11
- ATNStateLoopEnd = 12
-
- ATNStateInvalidStateNumber = -1
-)
-
-var ATNStateInitialNumTransitions = 4
-
-type ATNState interface {
- GetEpsilonOnlyTransitions() bool
-
- GetRuleIndex() int
- SetRuleIndex(int)
-
- GetNextTokenWithinRule() *IntervalSet
- SetNextTokenWithinRule(*IntervalSet)
-
- GetATN() *ATN
- SetATN(*ATN)
-
- GetStateType() int
-
- GetStateNumber() int
- SetStateNumber(int)
-
- GetTransitions() []Transition
- SetTransitions([]Transition)
- AddTransition(Transition, int)
-
- String() string
- Hash() int
- Equals(Collectable[ATNState]) bool
-}
-
-type BaseATNState struct {
- // NextTokenWithinRule caches lookahead during parsing. Not used during construction.
- NextTokenWithinRule *IntervalSet
-
- // atn is the current ATN.
- atn *ATN
-
- epsilonOnlyTransitions bool
-
- // ruleIndex tracks the Rule index because there are no Rule objects at runtime.
- ruleIndex int
-
- stateNumber int
-
- stateType int
-
- // Track the transitions emanating from this ATN state.
- transitions []Transition
-}
-
-func NewBaseATNState() *BaseATNState {
- return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
-}
-
-func (as *BaseATNState) GetRuleIndex() int {
- return as.ruleIndex
-}
-
-func (as *BaseATNState) SetRuleIndex(v int) {
- as.ruleIndex = v
-}
-func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
- return as.epsilonOnlyTransitions
-}
-
-func (as *BaseATNState) GetATN() *ATN {
- return as.atn
-}
-
-func (as *BaseATNState) SetATN(atn *ATN) {
- as.atn = atn
-}
-
-func (as *BaseATNState) GetTransitions() []Transition {
- return as.transitions
-}
-
-func (as *BaseATNState) SetTransitions(t []Transition) {
- as.transitions = t
-}
-
-func (as *BaseATNState) GetStateType() int {
- return as.stateType
-}
-
-func (as *BaseATNState) GetStateNumber() int {
- return as.stateNumber
-}
-
-func (as *BaseATNState) SetStateNumber(stateNumber int) {
- as.stateNumber = stateNumber
-}
-
-func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
- return as.NextTokenWithinRule
-}
-
-func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
- as.NextTokenWithinRule = v
-}
-
-func (as *BaseATNState) Hash() int {
- return as.stateNumber
-}
-
-func (as *BaseATNState) String() string {
- return strconv.Itoa(as.stateNumber)
-}
-
-func (as *BaseATNState) Equals(other Collectable[ATNState]) bool {
- if ot, ok := other.(ATNState); ok {
- return as.stateNumber == ot.GetStateNumber()
- }
-
- return false
-}
-
-func (as *BaseATNState) isNonGreedyExitState() bool {
- return false
-}
-
-func (as *BaseATNState) AddTransition(trans Transition, index int) {
- if len(as.transitions) == 0 {
- as.epsilonOnlyTransitions = trans.getIsEpsilon()
- } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
- as.epsilonOnlyTransitions = false
- }
-
- if index == -1 {
- as.transitions = append(as.transitions, trans)
- } else {
- as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
- // TODO: as.transitions.splice(index, 1, trans)
- }
-}
-
-type BasicState struct {
- *BaseATNState
-}
-
-func NewBasicState() *BasicState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateBasic
-
- return &BasicState{BaseATNState: b}
-}
-
-type DecisionState interface {
- ATNState
-
- getDecision() int
- setDecision(int)
-
- getNonGreedy() bool
- setNonGreedy(bool)
-}
-
-type BaseDecisionState struct {
- *BaseATNState
- decision int
- nonGreedy bool
-}
-
-func NewBaseDecisionState() *BaseDecisionState {
- return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
-}
-
-func (s *BaseDecisionState) getDecision() int {
- return s.decision
-}
-
-func (s *BaseDecisionState) setDecision(b int) {
- s.decision = b
-}
-
-func (s *BaseDecisionState) getNonGreedy() bool {
- return s.nonGreedy
-}
-
-func (s *BaseDecisionState) setNonGreedy(b bool) {
- s.nonGreedy = b
-}
-
-type BlockStartState interface {
- DecisionState
-
- getEndState() *BlockEndState
- setEndState(*BlockEndState)
-}
-
-// BaseBlockStartState is the start of a regular (...) block.
-type BaseBlockStartState struct {
- *BaseDecisionState
- endState *BlockEndState
-}
-
-func NewBlockStartState() *BaseBlockStartState {
- return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
-}
-
-func (s *BaseBlockStartState) getEndState() *BlockEndState {
- return s.endState
-}
-
-func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
- s.endState = b
-}
-
-type BasicBlockStartState struct {
- *BaseBlockStartState
-}
-
-func NewBasicBlockStartState() *BasicBlockStartState {
- b := NewBlockStartState()
-
- b.stateType = ATNStateBlockStart
-
- return &BasicBlockStartState{BaseBlockStartState: b}
-}
-
-var _ BlockStartState = &BasicBlockStartState{}
-
-// BlockEndState is a terminal node of a simple (a|b|c) block.
-type BlockEndState struct {
- *BaseATNState
- startState ATNState
-}
-
-func NewBlockEndState() *BlockEndState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateBlockEnd
-
- return &BlockEndState{BaseATNState: b}
-}
-
-// RuleStopState is the last node in the ATN for a rule, unless that rule is the
-// start symbol. In that case, there is one transition to EOF. Later, we might
-// encode references to all calls to this rule to compute FOLLOW sets for error
-// handling.
-type RuleStopState struct {
- *BaseATNState
-}
-
-func NewRuleStopState() *RuleStopState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateRuleStop
-
- return &RuleStopState{BaseATNState: b}
-}
-
-type RuleStartState struct {
- *BaseATNState
- stopState ATNState
- isPrecedenceRule bool
-}
-
-func NewRuleStartState() *RuleStartState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateRuleStart
-
- return &RuleStartState{BaseATNState: b}
-}
-
-// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
-// transitions: one to the loop back to start of the block, and one to exit.
-type PlusLoopbackState struct {
- *BaseDecisionState
-}
-
-func NewPlusLoopbackState() *PlusLoopbackState {
- b := NewBaseDecisionState()
-
- b.stateType = ATNStatePlusLoopBack
-
- return &PlusLoopbackState{BaseDecisionState: b}
-}
-
-// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
-// decision state; we don't use it for code generation. Somebody might need it,
-// it is included for completeness. In reality, PlusLoopbackState is the real
-// decision-making node for A+.
-type PlusBlockStartState struct {
- *BaseBlockStartState
- loopBackState ATNState
-}
-
-func NewPlusBlockStartState() *PlusBlockStartState {
- b := NewBlockStartState()
-
- b.stateType = ATNStatePlusBlockStart
-
- return &PlusBlockStartState{BaseBlockStartState: b}
-}
-
-var _ BlockStartState = &PlusBlockStartState{}
-
-// StarBlockStartState is the block that begins a closure loop.
-type StarBlockStartState struct {
- *BaseBlockStartState
-}
-
-func NewStarBlockStartState() *StarBlockStartState {
- b := NewBlockStartState()
-
- b.stateType = ATNStateStarBlockStart
-
- return &StarBlockStartState{BaseBlockStartState: b}
-}
-
-var _ BlockStartState = &StarBlockStartState{}
-
-type StarLoopbackState struct {
- *BaseATNState
-}
-
-func NewStarLoopbackState() *StarLoopbackState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateStarLoopBack
-
- return &StarLoopbackState{BaseATNState: b}
-}
-
-type StarLoopEntryState struct {
- *BaseDecisionState
- loopBackState ATNState
- precedenceRuleDecision bool
-}
-
-func NewStarLoopEntryState() *StarLoopEntryState {
- b := NewBaseDecisionState()
-
- b.stateType = ATNStateStarLoopEntry
-
- // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
- return &StarLoopEntryState{BaseDecisionState: b}
-}
-
-// LoopEndState marks the end of a * or + loop.
-type LoopEndState struct {
- *BaseATNState
- loopBackState ATNState
-}
-
-func NewLoopEndState() *LoopEndState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateLoopEnd
-
- return &LoopEndState{BaseATNState: b}
-}
-
-// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
-type TokensStartState struct {
- *BaseDecisionState
-}
-
-func NewTokensStartState() *TokensStartState {
- b := NewBaseDecisionState()
-
- b.stateType = ATNStateTokenStart
-
- return &TokensStartState{BaseDecisionState: b}
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
deleted file mode 100644
index f679f0dcd..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "os"
- "strconv"
-)
-
-// Provides an empty default implementation of {@link ANTLRErrorListener}. The
-// default implementation of each method does nothing, but can be overridden as
-// necessary.
-
-type ErrorListener interface {
- SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
- ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
- ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
- ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
-}
-
-type DefaultErrorListener struct {
-}
-
-func NewDefaultErrorListener() *DefaultErrorListener {
- return new(DefaultErrorListener)
-}
-
-func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
-}
-
-func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
-}
-
-func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
-}
-
-func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
-}
-
-type ConsoleErrorListener struct {
- *DefaultErrorListener
-}
-
-func NewConsoleErrorListener() *ConsoleErrorListener {
- return new(ConsoleErrorListener)
-}
-
-// Provides a default instance of {@link ConsoleErrorListener}.
-var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
-
-// {@inheritDoc}
-//
-//
-// This implementation prints messages to {@link System//err} containing the
-// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
-// the following format.
-//
-//
-// line line:charPositionInLine msg
-//
-func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
- fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
-}
-
-type ProxyErrorListener struct {
- *DefaultErrorListener
- delegates []ErrorListener
-}
-
-func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
- if delegates == nil {
- panic("delegates is not provided")
- }
- l := new(ProxyErrorListener)
- l.delegates = delegates
- return l
-}
-
-func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
- for _, d := range p.delegates {
- d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
- }
-}
-
-func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
- for _, d := range p.delegates {
- d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
- }
-}
-
-func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
- for _, d := range p.delegates {
- d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
- }
-}
-
-func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
- for _, d := range p.delegates {
- d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
- }
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
deleted file mode 100644
index 5c0a637ba..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
+++ /dev/null
@@ -1,734 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
-)
-
-type ErrorStrategy interface {
- reset(Parser)
- RecoverInline(Parser) Token
- Recover(Parser, RecognitionException)
- Sync(Parser)
- InErrorRecoveryMode(Parser) bool
- ReportError(Parser, RecognitionException)
- ReportMatch(Parser)
-}
-
-// This is the default implementation of {@link ANTLRErrorStrategy} used for
-// error Reporting and recovery in ANTLR parsers.
-type DefaultErrorStrategy struct {
- errorRecoveryMode bool
- lastErrorIndex int
- lastErrorStates *IntervalSet
-}
-
-var _ ErrorStrategy = &DefaultErrorStrategy{}
-
-func NewDefaultErrorStrategy() *DefaultErrorStrategy {
-
- d := new(DefaultErrorStrategy)
-
- // Indicates whether the error strategy is currently "recovering from an
- // error". This is used to suppress Reporting multiple error messages while
- // attempting to recover from a detected syntax error.
- //
- // @see //InErrorRecoveryMode
- //
- d.errorRecoveryMode = false
-
- // The index into the input stream where the last error occurred.
- // This is used to prevent infinite loops where an error is found
- // but no token is consumed during recovery...another error is found,
- // ad nauseum. This is a failsafe mechanism to guarantee that at least
- // one token/tree node is consumed for two errors.
- //
- d.lastErrorIndex = -1
- d.lastErrorStates = nil
- return d
-}
-
-// The default implementation simply calls {@link //endErrorCondition} to
-// ensure that the handler is not in error recovery mode.
-func (d *DefaultErrorStrategy) reset(recognizer Parser) {
- d.endErrorCondition(recognizer)
-}
-
-// This method is called to enter error recovery mode when a recognition
-// exception is Reported.
-//
-// @param recognizer the parser instance
-func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
- d.errorRecoveryMode = true
-}
-
-func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool {
- return d.errorRecoveryMode
-}
-
-// This method is called to leave error recovery mode after recovering from
-// a recognition exception.
-//
-// @param recognizer
-func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
- d.errorRecoveryMode = false
- d.lastErrorStates = nil
- d.lastErrorIndex = -1
-}
-
-// {@inheritDoc}
-//
-// The default implementation simply calls {@link //endErrorCondition}.
-func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
- d.endErrorCondition(recognizer)
-}
-
-// {@inheritDoc}
-//
-// The default implementation returns immediately if the handler is already
-// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
-// and dispatches the Reporting task based on the runtime type of {@code e}
-// according to the following table.
-//
-//
-// - {@link NoViableAltException}: Dispatches the call to
-// {@link //ReportNoViableAlternative}
-// - {@link InputMisMatchException}: Dispatches the call to
-// {@link //ReportInputMisMatch}
-// - {@link FailedPredicateException}: Dispatches the call to
-// {@link //ReportFailedPredicate}
-// - All other types: calls {@link Parser//NotifyErrorListeners} to Report
-// the exception
-//
-func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
- // if we've already Reported an error and have not Matched a token
- // yet successfully, don't Report any errors.
- if d.InErrorRecoveryMode(recognizer) {
- return // don't Report spurious errors
- }
- d.beginErrorCondition(recognizer)
-
- switch t := e.(type) {
- default:
- fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
- // fmt.Println(e.stack)
- recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
- case *NoViableAltException:
- d.ReportNoViableAlternative(recognizer, t)
- case *InputMisMatchException:
- d.ReportInputMisMatch(recognizer, t)
- case *FailedPredicateException:
- d.ReportFailedPredicate(recognizer, t)
- }
-}
-
-// {@inheritDoc}
-//
-// The default implementation reSynchronizes the parser by consuming tokens
-// until we find one in the reSynchronization set--loosely the set of tokens
-// that can follow the current rule.
-func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
-
- if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
- d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
- // uh oh, another error at same token index and previously-Visited
- // state in ATN must be a case where LT(1) is in the recovery
- // token set so nothing got consumed. Consume a single token
- // at least to prevent an infinite loop d is a failsafe.
- recognizer.Consume()
- }
- d.lastErrorIndex = recognizer.GetInputStream().Index()
- if d.lastErrorStates == nil {
- d.lastErrorStates = NewIntervalSet()
- }
- d.lastErrorStates.addOne(recognizer.GetState())
- followSet := d.getErrorRecoverySet(recognizer)
- d.consumeUntil(recognizer, followSet)
-}
-
-// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
-// that the current lookahead symbol is consistent with what were expecting
-// at d point in the ATN. You can call d anytime but ANTLR only
-// generates code to check before subrules/loops and each iteration.
-//
-// Implements Jim Idle's magic Sync mechanism in closures and optional
-// subrules. E.g.,
-//
-//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
-//
-//
-// At the start of a sub rule upon error, {@link //Sync} performs single
-// token deletion, if possible. If it can't do that, it bails on the current
-// rule and uses the default error recovery, which consumes until the
-// reSynchronization set of the current rule.
-//
-// If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
-// with an empty alternative), then the expected set includes what follows
-// the subrule.
-//
-// During loop iteration, it consumes until it sees a token that can start a
-// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
-// stay in the loop as long as possible.
-//
-// ORIGINS
-//
-// Previous versions of ANTLR did a poor job of their recovery within loops.
-// A single mismatch token or missing token would force the parser to bail
-// out of the entire rules surrounding the loop. So, for rule
-//
-//
-// classfunc : 'class' ID '{' member* '}'
-//
-//
-// input with an extra token between members would force the parser to
-// consume until it found the next class definition rather than the next
-// member definition of the current class.
-//
-// This functionality cost a little bit of effort because the parser has to
-// compare token set at the start of the loop and at each iteration. If for
-// some reason speed is suffering for you, you can turn off d
-// functionality by simply overriding d method as a blank { }.
-func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
- // If already recovering, don't try to Sync
- if d.InErrorRecoveryMode(recognizer) {
- return
- }
-
- s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
- la := recognizer.GetTokenStream().LA(1)
-
- // try cheaper subset first might get lucky. seems to shave a wee bit off
- nextTokens := recognizer.GetATN().NextTokens(s, nil)
- if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
- return
- }
-
- switch s.GetStateType() {
- case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
- // Report error and recover if possible
- if d.SingleTokenDeletion(recognizer) != nil {
- return
- }
- panic(NewInputMisMatchException(recognizer))
- case ATNStatePlusLoopBack, ATNStateStarLoopBack:
- d.ReportUnwantedToken(recognizer)
- expecting := NewIntervalSet()
- expecting.addSet(recognizer.GetExpectedTokens())
- whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
- d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
- default:
- // do nothing if we can't identify the exact kind of ATN state
- }
-}
-
-// This is called by {@link //ReportError} when the exception is a
-// {@link NoViableAltException}.
-//
-// @see //ReportError
-//
-// @param recognizer the parser instance
-// @param e the recognition exception
-func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
- tokens := recognizer.GetTokenStream()
- var input string
- if tokens != nil {
- if e.startToken.GetTokenType() == TokenEOF {
- input = ""
- } else {
- input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
- }
- } else {
- input = ""
- }
- msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
- recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
-}
-
-// This is called by {@link //ReportError} when the exception is an
-// {@link InputMisMatchException}.
-//
-// @see //ReportError
-//
-// @param recognizer the parser instance
-// @param e the recognition exception
-func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
- msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
- " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
- recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
-}
-
-// This is called by {@link //ReportError} when the exception is a
-// {@link FailedPredicateException}.
-//
-// @see //ReportError
-//
-// @param recognizer the parser instance
-// @param e the recognition exception
-func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
- ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
- msg := "rule " + ruleName + " " + e.message
- recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
-}
-
-// This method is called to Report a syntax error which requires the removal
-// of a token from the input stream. At the time d method is called, the
-// erroneous symbol is current {@code LT(1)} symbol and has not yet been
-// removed from the input stream. When d method returns,
-// {@code recognizer} is in error recovery mode.
-//
-// This method is called when {@link //singleTokenDeletion} identifies
-// single-token deletion as a viable recovery strategy for a mismatched
-// input error.
-//
-// The default implementation simply returns if the handler is already in
-// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
-// enter error recovery mode, followed by calling
-// {@link Parser//NotifyErrorListeners}.
-//
-// @param recognizer the parser instance
-func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
- if d.InErrorRecoveryMode(recognizer) {
- return
- }
- d.beginErrorCondition(recognizer)
- t := recognizer.GetCurrentToken()
- tokenName := d.GetTokenErrorDisplay(t)
- expecting := d.GetExpectedTokens(recognizer)
- msg := "extraneous input " + tokenName + " expecting " +
- expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
- recognizer.NotifyErrorListeners(msg, t, nil)
-}
-
-// This method is called to Report a syntax error which requires the
-// insertion of a missing token into the input stream. At the time d
-// method is called, the missing token has not yet been inserted. When d
-// method returns, {@code recognizer} is in error recovery mode.
-//
-// This method is called when {@link //singleTokenInsertion} identifies
-// single-token insertion as a viable recovery strategy for a mismatched
-// input error.
-//
-// The default implementation simply returns if the handler is already in
-// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
-// enter error recovery mode, followed by calling
-// {@link Parser//NotifyErrorListeners}.
-//
-// @param recognizer the parser instance
-func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
- if d.InErrorRecoveryMode(recognizer) {
- return
- }
- d.beginErrorCondition(recognizer)
- t := recognizer.GetCurrentToken()
- expecting := d.GetExpectedTokens(recognizer)
- msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
- " at " + d.GetTokenErrorDisplay(t)
- recognizer.NotifyErrorListeners(msg, t, nil)
-}
-
-// The default implementation attempts to recover from the mismatched input
-// by using single token insertion and deletion as described below. If the
-// recovery attempt fails, d method panics an
-// {@link InputMisMatchException}.
-//
-// EXTRA TOKEN (single token deletion)
-//
-// {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
-// right token, however, then assume {@code LA(1)} is some extra spurious
-// token and delete it. Then consume and return the next token (which was
-// the {@code LA(2)} token) as the successful result of the Match operation.
-//
-// This recovery strategy is implemented by {@link
-// //singleTokenDeletion}.
-//
-// MISSING TOKEN (single token insertion)
-//
-// If current token (at {@code LA(1)}) is consistent with what could come
-// after the expected {@code LA(1)} token, then assume the token is missing
-// and use the parser's {@link TokenFactory} to create it on the fly. The
-// "insertion" is performed by returning the created token as the successful
-// result of the Match operation.
-//
-// This recovery strategy is implemented by {@link
-// //singleTokenInsertion}.
-//
-// EXAMPLE
-//
-// For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
-// the parser returns from the nested call to {@code expr}, it will have
-// call chain:
-//
-//
-// stat &rarr expr &rarr atom
-//
-//
-// and it will be trying to Match the {@code ')'} at d point in the
-// derivation:
-//
-//
-// => ID '=' '(' INT ')' ('+' atom)* ”
-// ^
-//
-//
-// The attempt to Match {@code ')'} will fail when it sees {@code ”} and
-// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”}
-// is in the set of tokens that can follow the {@code ')'} token reference
-// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
-func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
- // SINGLE TOKEN DELETION
- MatchedSymbol := d.SingleTokenDeletion(recognizer)
- if MatchedSymbol != nil {
- // we have deleted the extra token.
- // now, move past ttype token as if all were ok
- recognizer.Consume()
- return MatchedSymbol
- }
- // SINGLE TOKEN INSERTION
- if d.SingleTokenInsertion(recognizer) {
- return d.GetMissingSymbol(recognizer)
- }
- // even that didn't work must panic the exception
- panic(NewInputMisMatchException(recognizer))
-}
-
-// This method implements the single-token insertion inline error recovery
-// strategy. It is called by {@link //recoverInline} if the single-token
-// deletion strategy fails to recover from the mismatched input. If this
-// method returns {@code true}, {@code recognizer} will be in error recovery
-// mode.
-//
-// This method determines whether or not single-token insertion is viable by
-// checking if the {@code LA(1)} input symbol could be successfully Matched
-// if it were instead the {@code LA(2)} symbol. If d method returns
-// {@code true}, the caller is responsible for creating and inserting a
-// token with the correct type to produce d behavior.
-//
-// @param recognizer the parser instance
-// @return {@code true} if single-token insertion is a viable recovery
-// strategy for the current mismatched input, otherwise {@code false}
-func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
- currentSymbolType := recognizer.GetTokenStream().LA(1)
- // if current token is consistent with what could come after current
- // ATN state, then we know we're missing a token error recovery
- // is free to conjure up and insert the missing token
- atn := recognizer.GetInterpreter().atn
- currentState := atn.states[recognizer.GetState()]
- next := currentState.GetTransitions()[0].getTarget()
- expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
- if expectingAtLL2.contains(currentSymbolType) {
- d.ReportMissingToken(recognizer)
- return true
- }
-
- return false
-}
-
-// This method implements the single-token deletion inline error recovery
-// strategy. It is called by {@link //recoverInline} to attempt to recover
-// from mismatched input. If this method returns nil, the parser and error
-// handler state will not have changed. If this method returns non-nil,
-// {@code recognizer} will not be in error recovery mode since the
-// returned token was a successful Match.
-//
-// If the single-token deletion is successful, d method calls
-// {@link //ReportUnwantedToken} to Report the error, followed by
-// {@link Parser//consume} to actually "delete" the extraneous token. Then,
-// before returning {@link //ReportMatch} is called to signal a successful
-// Match.
-//
-// @param recognizer the parser instance
-// @return the successfully Matched {@link Token} instance if single-token
-// deletion successfully recovers from the mismatched input, otherwise
-// {@code nil}
-func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
- NextTokenType := recognizer.GetTokenStream().LA(2)
- expecting := d.GetExpectedTokens(recognizer)
- if expecting.contains(NextTokenType) {
- d.ReportUnwantedToken(recognizer)
- // print("recoverFromMisMatchedToken deleting " \
- // + str(recognizer.GetTokenStream().LT(1)) \
- // + " since " + str(recognizer.GetTokenStream().LT(2)) \
- // + " is what we want", file=sys.stderr)
- recognizer.Consume() // simply delete extra token
- // we want to return the token we're actually Matching
- MatchedSymbol := recognizer.GetCurrentToken()
- d.ReportMatch(recognizer) // we know current token is correct
- return MatchedSymbol
- }
-
- return nil
-}
-
-// Conjure up a missing token during error recovery.
-//
-// The recognizer attempts to recover from single missing
-// symbols. But, actions might refer to that missing symbol.
-// For example, x=ID {f($x)}. The action clearly assumes
-// that there has been an identifier Matched previously and that
-// $x points at that token. If that token is missing, but
-// the next token in the stream is what we want we assume that
-// d token is missing and we keep going. Because we
-// have to return some token to replace the missing token,
-// we have to conjure one up. This method gives the user control
-// over the tokens returned for missing tokens. Mostly,
-// you will want to create something special for identifier
-// tokens. For literals such as '{' and ',', the default
-// action in the parser or tree parser works. It simply creates
-// a CommonToken of the appropriate type. The text will be the token.
-// If you change what tokens must be created by the lexer,
-// override d method to create the appropriate tokens.
-func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
- currentSymbol := recognizer.GetCurrentToken()
- expecting := d.GetExpectedTokens(recognizer)
- expectedTokenType := expecting.first()
- var tokenText string
-
- if expectedTokenType == TokenEOF {
- tokenText = ""
- } else {
- ln := recognizer.GetLiteralNames()
- if expectedTokenType > 0 && expectedTokenType < len(ln) {
- tokenText = ""
- } else {
- tokenText = "" // TODO matches the JS impl
- }
- }
- current := currentSymbol
- lookback := recognizer.GetTokenStream().LT(-1)
- if current.GetTokenType() == TokenEOF && lookback != nil {
- current = lookback
- }
-
- tf := recognizer.GetTokenFactory()
-
- return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
-}
-
-func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
- return recognizer.GetExpectedTokens()
-}
-
-// How should a token be displayed in an error message? The default
-// is to display just the text, but during development you might
-// want to have a lot of information spit out. Override in that case
-// to use t.String() (which, for CommonToken, dumps everything about
-// the token). This is better than forcing you to override a method in
-// your token objects because you don't have to go modify your lexer
-// so that it creates a NewJava type.
-func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
- if t == nil {
- return ""
- }
- s := t.GetText()
- if s == "" {
- if t.GetTokenType() == TokenEOF {
- s = ""
- } else {
- s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
- }
- }
- return d.escapeWSAndQuote(s)
-}
-
-func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
- s = strings.Replace(s, "\t", "\\t", -1)
- s = strings.Replace(s, "\n", "\\n", -1)
- s = strings.Replace(s, "\r", "\\r", -1)
- return "'" + s + "'"
-}
-
-// Compute the error recovery set for the current rule. During
-// rule invocation, the parser pushes the set of tokens that can
-// follow that rule reference on the stack d amounts to
-// computing FIRST of what follows the rule reference in the
-// enclosing rule. See LinearApproximator.FIRST().
-// This local follow set only includes tokens
-// from within the rule i.e., the FIRST computation done by
-// ANTLR stops at the end of a rule.
-//
-// # EXAMPLE
-//
-// When you find a "no viable alt exception", the input is not
-// consistent with any of the alternatives for rule r. The best
-// thing to do is to consume tokens until you see something that
-// can legally follow a call to r//or* any rule that called r.
-// You don't want the exact set of viable next tokens because the
-// input might just be missing a token--you might consume the
-// rest of the input looking for one of the missing tokens.
-//
-// Consider grammar:
-//
-// a : '[' b ']'
-// | '(' b ')'
-//
-// b : c '^' INT
-// c : ID
-// | INT
-//
-// At each rule invocation, the set of tokens that could follow
-// that rule is pushed on a stack. Here are the various
-// context-sensitive follow sets:
-//
-// FOLLOW(b1_in_a) = FIRST(']') = ']'
-// FOLLOW(b2_in_a) = FIRST(')') = ')'
-// FOLLOW(c_in_b) = FIRST('^') = '^'
-//
-// Upon erroneous input "[]", the call chain is
-//
-// a -> b -> c
-//
-// and, hence, the follow context stack is:
-//
-// depth follow set start of rule execution
-// 0 a (from main())
-// 1 ']' b
-// 2 '^' c
-//
-// Notice that ')' is not included, because b would have to have
-// been called from a different context in rule a for ')' to be
-// included.
-//
-// For error recovery, we cannot consider FOLLOW(c)
-// (context-sensitive or otherwise). We need the combined set of
-// all context-sensitive FOLLOW sets--the set of all tokens that
-// could follow any reference in the call chain. We need to
-// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
-// we reSync'd to that token, we'd consume until EOF. We need to
-// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
-// In this case, for input "[]", LA(1) is ']' and in the set, so we would
-// not consume anything. After printing an error, rule c would
-// return normally. Rule b would not find the required '^' though.
-// At this point, it gets a mismatched token error and panics an
-// exception (since LA(1) is not in the viable following token
-// set). The rule exception handler tries to recover, but finds
-// the same recovery set and doesn't consume anything. Rule b
-// exits normally returning to rule a. Now it finds the ']' (and
-// with the successful Match exits errorRecovery mode).
-//
-// So, you can see that the parser walks up the call chain looking
-// for the token that was a member of the recovery set.
-//
-// Errors are not generated in errorRecovery mode.
-//
-// ANTLR's error recovery mechanism is based upon original ideas:
-//
-// "Algorithms + Data Structures = Programs" by Niklaus Wirth
-//
-// and
-//
-// "A note on error recovery in recursive descent parsers":
-// http://portal.acm.org/citation.cfm?id=947902.947905
-//
-// Later, Josef Grosch had some good ideas:
-//
-// "Efficient and Comfortable Error Recovery in Recursive Descent
-// Parsers":
-// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
-//
-// Like Grosch I implement context-sensitive FOLLOW sets that are combined
-// at run-time upon error to avoid overhead during parsing.
-func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
- atn := recognizer.GetInterpreter().atn
- ctx := recognizer.GetParserRuleContext()
- recoverSet := NewIntervalSet()
- for ctx != nil && ctx.GetInvokingState() >= 0 {
- // compute what follows who invoked us
- invokingState := atn.states[ctx.GetInvokingState()]
- rt := invokingState.GetTransitions()[0]
- follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
- recoverSet.addSet(follow)
- ctx = ctx.GetParent().(ParserRuleContext)
- }
- recoverSet.removeOne(TokenEpsilon)
- return recoverSet
-}
-
-// Consume tokens until one Matches the given token set.//
-func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
- ttype := recognizer.GetTokenStream().LA(1)
- for ttype != TokenEOF && !set.contains(ttype) {
- recognizer.Consume()
- ttype = recognizer.GetTokenStream().LA(1)
- }
-}
-
-//
-// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
-// by immediately canceling the parse operation with a
-// {@link ParseCancellationException}. The implementation ensures that the
-// {@link ParserRuleContext//exception} field is set for all parse tree nodes
-// that were not completed prior to encountering the error.
-//
-//
-// This error strategy is useful in the following scenarios.
-//
-//
-// - Two-stage parsing: This error strategy allows the first
-// stage of two-stage parsing to immediately terminate if an error is
-// encountered, and immediately fall back to the second stage. In addition to
-// avoiding wasted work by attempting to recover from errors here, the empty
-// implementation of {@link BailErrorStrategy//Sync} improves the performance of
-// the first stage.
-// - Silent validation: When syntax errors are not being
-// Reported or logged, and the parse result is simply ignored if errors occur,
-// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
-// when the result will be ignored either way.
-//
-//
-//
-// {@code myparser.setErrorHandler(NewBailErrorStrategy())}
-//
-// @see Parser//setErrorHandler(ANTLRErrorStrategy)
-
-type BailErrorStrategy struct {
- *DefaultErrorStrategy
-}
-
-var _ ErrorStrategy = &BailErrorStrategy{}
-
-func NewBailErrorStrategy() *BailErrorStrategy {
-
- b := new(BailErrorStrategy)
-
- b.DefaultErrorStrategy = NewDefaultErrorStrategy()
-
- return b
-}
-
-// Instead of recovering from exception {@code e}, re-panic it wrapped
-// in a {@link ParseCancellationException} so it is not caught by the
-// rule func catches. Use {@link Exception//getCause()} to get the
-// original {@link RecognitionException}.
-func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
- context := recognizer.GetParserRuleContext()
- for context != nil {
- context.SetException(e)
- if parent, ok := context.GetParent().(ParserRuleContext); ok {
- context = parent
- } else {
- context = nil
- }
- }
- panic(NewParseCancellationException()) // TODO we don't emit e properly
-}
-
-// Make sure we don't attempt to recover inline if the parser
-// successfully recovers, it won't panic an exception.
-func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
- b.Recover(recognizer, NewInputMisMatchException(recognizer))
-
- return nil
-}
-
-// Make sure we don't attempt to recover from problems in subrules.//
-func (b *BailErrorStrategy) Sync(recognizer Parser) {
- // pass
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
deleted file mode 100644
index 3954c1378..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
-// 3 kinds of errors: prediction errors, failed predicate errors, and
-// mismatched input errors. In each case, the parser knows where it is
-// in the input, where it is in the ATN, the rule invocation stack,
-// and what kind of problem occurred.
-
-type RecognitionException interface {
- GetOffendingToken() Token
- GetMessage() string
- GetInputStream() IntStream
-}
-
-type BaseRecognitionException struct {
- message string
- recognizer Recognizer
- offendingToken Token
- offendingState int
- ctx RuleContext
- input IntStream
-}
-
-func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
-
- // todo
- // Error.call(this)
- //
- // if (!!Error.captureStackTrace) {
- // Error.captureStackTrace(this, RecognitionException)
- // } else {
- // stack := NewError().stack
- // }
- // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
-
- t := new(BaseRecognitionException)
-
- t.message = message
- t.recognizer = recognizer
- t.input = input
- t.ctx = ctx
- // The current {@link Token} when an error occurred. Since not all streams
- // support accessing symbols by index, we have to track the {@link Token}
- // instance itself.
- t.offendingToken = nil
- // Get the ATN state number the parser was in at the time the error
- // occurred. For {@link NoViableAltException} and
- // {@link LexerNoViableAltException} exceptions, this is the
- // {@link DecisionState} number. For others, it is the state whose outgoing
- // edge we couldn't Match.
- t.offendingState = -1
- if t.recognizer != nil {
- t.offendingState = t.recognizer.GetState()
- }
-
- return t
-}
-
-func (b *BaseRecognitionException) GetMessage() string {
- return b.message
-}
-
-func (b *BaseRecognitionException) GetOffendingToken() Token {
- return b.offendingToken
-}
-
-func (b *BaseRecognitionException) GetInputStream() IntStream {
- return b.input
-}
-
-// If the state number is not known, b method returns -1.
-
-// Gets the set of input symbols which could potentially follow the
-// previously Matched symbol at the time b exception was panicn.
-//
-// If the set of expected tokens is not known and could not be computed,
-// b method returns {@code nil}.
-//
-// @return The set of token types that could potentially follow the current
-// state in the ATN, or {@code nil} if the information is not available.
-// /
-func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
- if b.recognizer != nil {
- return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
- }
-
- return nil
-}
-
-func (b *BaseRecognitionException) String() string {
- return b.message
-}
-
-type LexerNoViableAltException struct {
- *BaseRecognitionException
-
- startIndex int
- deadEndConfigs ATNConfigSet
-}
-
-func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
-
- l := new(LexerNoViableAltException)
-
- l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
-
- l.startIndex = startIndex
- l.deadEndConfigs = deadEndConfigs
-
- return l
-}
-
-func (l *LexerNoViableAltException) String() string {
- symbol := ""
- if l.startIndex >= 0 && l.startIndex < l.input.Size() {
- symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
- }
- return "LexerNoViableAltException" + symbol
-}
-
-type NoViableAltException struct {
- *BaseRecognitionException
-
- startToken Token
- offendingToken Token
- ctx ParserRuleContext
- deadEndConfigs ATNConfigSet
-}
-
-// Indicates that the parser could not decide which of two or more paths
-// to take based upon the remaining input. It tracks the starting token
-// of the offending input and also knows where the parser was
-// in the various paths when the error. Reported by ReportNoViableAlternative()
-func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
-
- if ctx == nil {
- ctx = recognizer.GetParserRuleContext()
- }
-
- if offendingToken == nil {
- offendingToken = recognizer.GetCurrentToken()
- }
-
- if startToken == nil {
- startToken = recognizer.GetCurrentToken()
- }
-
- if input == nil {
- input = recognizer.GetInputStream().(TokenStream)
- }
-
- n := new(NoViableAltException)
- n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
-
- // Which configurations did we try at input.Index() that couldn't Match
- // input.LT(1)?//
- n.deadEndConfigs = deadEndConfigs
- // The token object at the start index the input stream might
- // not be buffering tokens so get a reference to it. (At the
- // time the error occurred, of course the stream needs to keep a
- // buffer all of the tokens but later we might not have access to those.)
- n.startToken = startToken
- n.offendingToken = offendingToken
-
- return n
-}
-
-type InputMisMatchException struct {
- *BaseRecognitionException
-}
-
-// This signifies any kind of mismatched input exceptions such as
-// when the current input does not Match the expected token.
-func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
-
- i := new(InputMisMatchException)
- i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
-
- i.offendingToken = recognizer.GetCurrentToken()
-
- return i
-
-}
-
-// A semantic predicate failed during validation. Validation of predicates
-// occurs when normally parsing the alternative just like Matching a token.
-// Disambiguating predicate evaluation occurs when we test a predicate during
-// prediction.
-
-type FailedPredicateException struct {
- *BaseRecognitionException
-
- ruleIndex int
- predicateIndex int
- predicate string
-}
-
-func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
-
- f := new(FailedPredicateException)
-
- f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
-
- s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
- trans := s.GetTransitions()[0]
- if trans2, ok := trans.(*PredicateTransition); ok {
- f.ruleIndex = trans2.ruleIndex
- f.predicateIndex = trans2.predIndex
- } else {
- f.ruleIndex = 0
- f.predicateIndex = 0
- }
- f.predicate = predicate
- f.offendingToken = recognizer.GetCurrentToken()
-
- return f
-}
-
-func (f *FailedPredicateException) formatMessage(predicate, message string) string {
- if message != "" {
- return message
- }
-
- return "failed predicate: {" + predicate + "}?"
-}
-
-type ParseCancellationException struct {
-}
-
-func NewParseCancellationException() *ParseCancellationException {
- // Error.call(this)
- // Error.captureStackTrace(this, ParseCancellationException)
- return new(ParseCancellationException)
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
deleted file mode 100644
index bd6ad5efe..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "bytes"
- "io"
- "os"
-)
-
-// This is an InputStream that is loaded from a file all at once
-// when you construct the object.
-
-type FileStream struct {
- *InputStream
-
- filename string
-}
-
-func NewFileStream(fileName string) (*FileStream, error) {
-
- buf := bytes.NewBuffer(nil)
-
- f, err := os.Open(fileName)
- if err != nil {
- return nil, err
- }
- defer f.Close()
- _, err = io.Copy(buf, f)
- if err != nil {
- return nil, err
- }
-
- fs := new(FileStream)
-
- fs.filename = fileName
- s := string(buf.Bytes())
-
- fs.InputStream = NewInputStream(s)
-
- return fs, nil
-
-}
-
-func (f *FileStream) GetSourceName() string {
- return f.filename
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
deleted file mode 100644
index a8b889ced..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-type InputStream struct {
- name string
- index int
- data []rune
- size int
-}
-
-func NewInputStream(data string) *InputStream {
-
- is := new(InputStream)
-
- is.name = ""
- is.index = 0
- is.data = []rune(data)
- is.size = len(is.data) // number of runes
-
- return is
-}
-
-func (is *InputStream) reset() {
- is.index = 0
-}
-
-func (is *InputStream) Consume() {
- if is.index >= is.size {
- // assert is.LA(1) == TokenEOF
- panic("cannot consume EOF")
- }
- is.index++
-}
-
-func (is *InputStream) LA(offset int) int {
-
- if offset == 0 {
- return 0 // nil
- }
- if offset < 0 {
- offset++ // e.g., translate LA(-1) to use offset=0
- }
- pos := is.index + offset - 1
-
- if pos < 0 || pos >= is.size { // invalid
- return TokenEOF
- }
-
- return int(is.data[pos])
-}
-
-func (is *InputStream) LT(offset int) int {
- return is.LA(offset)
-}
-
-func (is *InputStream) Index() int {
- return is.index
-}
-
-func (is *InputStream) Size() int {
- return is.size
-}
-
-// mark/release do nothing we have entire buffer
-func (is *InputStream) Mark() int {
- return -1
-}
-
-func (is *InputStream) Release(marker int) {
-}
-
-func (is *InputStream) Seek(index int) {
- if index <= is.index {
- is.index = index // just jump don't update stream state (line,...)
- return
- }
- // seek forward
- is.index = intMin(index, is.size)
-}
-
-func (is *InputStream) GetText(start int, stop int) string {
- if stop >= is.size {
- stop = is.size - 1
- }
- if start >= is.size {
- return ""
- }
-
- return string(is.data[start : stop+1])
-}
-
-func (is *InputStream) GetTextFromTokens(start, stop Token) string {
- if start != nil && stop != nil {
- return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
- }
-
- return ""
-}
-
-func (is *InputStream) GetTextFromInterval(i *Interval) string {
- return is.GetText(i.Start, i.Stop)
-}
-
-func (*InputStream) GetSourceName() string {
- return "Obtained from string"
-}
-
-func (is *InputStream) String() string {
- return string(is.data)
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
deleted file mode 100644
index e5a74f0c6..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
+++ /dev/null
@@ -1,198 +0,0 @@
-package antlr
-
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-import (
- "sort"
-)
-
-// Collectable is an interface that a struct should implement if it is to be
-// usable as a key in these collections.
-type Collectable[T any] interface {
- Hash() int
- Equals(other Collectable[T]) bool
-}
-
-type Comparator[T any] interface {
- Hash1(o T) int
- Equals2(T, T) bool
-}
-
-// JStore implements a container that allows the use of a struct to calculate the key
-// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
-// serve the needs of the ANTLR Go runtime.
-//
-// For ease of porting the logic of the runtime from the master target (Java), this collection
-// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
-// function as the key. The values are stored in a standard go map which internally is a form of hashmap
-// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
-// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
-// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
-// we understand the requirements, then this is fine - this is not a general purpose collection.
-type JStore[T any, C Comparator[T]] struct {
- store map[int][]T
- len int
- comparator Comparator[T]
-}
-
-func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] {
-
- if comparator == nil {
- panic("comparator cannot be nil")
- }
-
- s := &JStore[T, C]{
- store: make(map[int][]T, 1),
- comparator: comparator,
- }
- return s
-}
-
-// Put will store given value in the collection. Note that the key for storage is generated from
-// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
-// as any kind of general collection.
-//
-// If the key has a hash conflict, then the value will be added to the slice of values associated with the
-// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
-// tested by calling the equals() method on the key.
-//
-// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
-//
-// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
-func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn
-
- kh := s.comparator.Hash1(value)
-
- for _, v1 := range s.store[kh] {
- if s.comparator.Equals2(value, v1) {
- return v1, true
- }
- }
- s.store[kh] = append(s.store[kh], value)
- s.len++
- return value, false
-}
-
-// Get will return the value associated with the key - the type of the key is the same type as the value
-// which would not generally be useful, but this is a specific thing for ANTLR where the key is
-// generated using the object we are going to store.
-func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn
-
- kh := s.comparator.Hash1(key)
-
- for _, v := range s.store[kh] {
- if s.comparator.Equals2(key, v) {
- return v, true
- }
- }
- return key, false
-}
-
-// Contains returns true if the given key is present in the store
-func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn
-
- _, present := s.Get(key)
- return present
-}
-
-func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
- vs := make([]T, 0, len(s.store))
- for _, v := range s.store {
- vs = append(vs, v...)
- }
- sort.Slice(vs, func(i, j int) bool {
- return less(vs[i], vs[j])
- })
-
- return vs
-}
-
-func (s *JStore[T, C]) Each(f func(T) bool) {
- for _, e := range s.store {
- for _, v := range e {
- f(v)
- }
- }
-}
-
-func (s *JStore[T, C]) Len() int {
- return s.len
-}
-
-func (s *JStore[T, C]) Values() []T {
- vs := make([]T, 0, len(s.store))
- for _, e := range s.store {
- for _, v := range e {
- vs = append(vs, v)
- }
- }
- return vs
-}
-
-type entry[K, V any] struct {
- key K
- val V
-}
-
-type JMap[K, V any, C Comparator[K]] struct {
- store map[int][]*entry[K, V]
- len int
- comparator Comparator[K]
-}
-
-func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] {
- return &JMap[K, V, C]{
- store: make(map[int][]*entry[K, V], 1),
- comparator: comparator,
- }
-}
-
-func (m *JMap[K, V, C]) Put(key K, val V) {
- kh := m.comparator.Hash1(key)
-
- m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
- m.len++
-}
-
-func (m *JMap[K, V, C]) Values() []V {
- vs := make([]V, 0, len(m.store))
- for _, e := range m.store {
- for _, v := range e {
- vs = append(vs, v.val)
- }
- }
- return vs
-}
-
-func (m *JMap[K, V, C]) Get(key K) (V, bool) {
-
- var none V
- kh := m.comparator.Hash1(key)
- for _, e := range m.store[kh] {
- if m.comparator.Equals2(e.key, key) {
- return e.val, true
- }
- }
- return none, false
-}
-
-func (m *JMap[K, V, C]) Len() int {
- return len(m.store)
-}
-
-func (m *JMap[K, V, C]) Delete(key K) {
- kh := m.comparator.Hash1(key)
- for i, e := range m.store[kh] {
- if m.comparator.Equals2(e.key, key) {
- m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
- m.len--
- return
- }
- }
-}
-
-func (m *JMap[K, V, C]) Clear() {
- m.store = make(map[int][]*entry[K, V])
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
deleted file mode 100644
index be1ba7a7e..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import "golang.org/x/exp/slices"
-
-// Represents an executor for a sequence of lexer actions which traversed during
-// the Matching operation of a lexer rule (token).
-//
-// The executor tracks position information for position-dependent lexer actions
-// efficiently, ensuring that actions appearing only at the end of the rule do
-// not cause bloating of the {@link DFA} created for the lexer.
-
-type LexerActionExecutor struct {
- lexerActions []LexerAction
- cachedHash int
-}
-
-func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
-
- if lexerActions == nil {
- lexerActions = make([]LexerAction, 0)
- }
-
- l := new(LexerActionExecutor)
-
- l.lexerActions = lexerActions
-
- // Caches the result of {@link //hashCode} since the hash code is an element
- // of the performance-critical {@link LexerATNConfig//hashCode} operation.
- l.cachedHash = murmurInit(57)
- for _, a := range lexerActions {
- l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
- }
-
- return l
-}
-
-// Creates a {@link LexerActionExecutor} which executes the actions for
-// the input {@code lexerActionExecutor} followed by a specified
-// {@code lexerAction}.
-//
-// @param lexerActionExecutor The executor for actions already traversed by
-// the lexer while Matching a token within a particular
-// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
-// though it were an empty executor.
-// @param lexerAction The lexer action to execute after the actions
-// specified in {@code lexerActionExecutor}.
-//
-// @return A {@link LexerActionExecutor} for executing the combine actions
-// of {@code lexerActionExecutor} and {@code lexerAction}.
-func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
- if lexerActionExecutor == nil {
- return NewLexerActionExecutor([]LexerAction{lexerAction})
- }
-
- return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
-}
-
-// Creates a {@link LexerActionExecutor} which encodes the current offset
-// for position-dependent lexer actions.
-//
-// Normally, when the executor encounters lexer actions where
-// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
-// {@link IntStream//seek} on the input {@link CharStream} to set the input
-// position to the end of the current token. This behavior provides
-// for efficient DFA representation of lexer actions which appear at the end
-// of a lexer rule, even when the lexer rule Matches a variable number of
-// characters.
-//
-// Prior to traversing a Match transition in the ATN, the current offset
-// from the token start index is assigned to all position-dependent lexer
-// actions which have not already been assigned a fixed offset. By storing
-// the offsets relative to the token start index, the DFA representation of
-// lexer actions which appear in the middle of tokens remains efficient due
-// to sharing among tokens of the same length, regardless of their absolute
-// position in the input stream.
-//
-// If the current executor already has offsets assigned to all
-// position-dependent lexer actions, the method returns {@code this}.
-//
-// @param offset The current offset to assign to all position-dependent
-// lexer actions which do not already have offsets assigned.
-//
-// @return A {@link LexerActionExecutor} which stores input stream offsets
-// for all position-dependent lexer actions.
-// /
-func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
- var updatedLexerActions []LexerAction
- for i := 0; i < len(l.lexerActions); i++ {
- _, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
- if l.lexerActions[i].getIsPositionDependent() && !ok {
- if updatedLexerActions == nil {
- updatedLexerActions = make([]LexerAction, 0)
-
- for _, a := range l.lexerActions {
- updatedLexerActions = append(updatedLexerActions, a)
- }
- }
-
- updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
- }
- }
- if updatedLexerActions == nil {
- return l
- }
-
- return NewLexerActionExecutor(updatedLexerActions)
-}
-
-// Execute the actions encapsulated by l executor within the context of a
-// particular {@link Lexer}.
-//
-// This method calls {@link IntStream//seek} to set the position of the
-// {@code input} {@link CharStream} prior to calling
-// {@link LexerAction//execute} on a position-dependent action. Before the
-// method returns, the input position will be restored to the same position
-// it was in when the method was invoked.
-//
-// @param lexer The lexer instance.
-// @param input The input stream which is the source for the current token.
-// When l method is called, the current {@link IntStream//index} for
-// {@code input} should be the start of the following token, i.e. 1
-// character past the end of the current token.
-// @param startIndex The token start index. This value may be passed to
-// {@link IntStream//seek} to set the {@code input} position to the beginning
-// of the token.
-// /
-func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
- requiresSeek := false
- stopIndex := input.Index()
-
- defer func() {
- if requiresSeek {
- input.Seek(stopIndex)
- }
- }()
-
- for i := 0; i < len(l.lexerActions); i++ {
- lexerAction := l.lexerActions[i]
- if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
- offset := la.offset
- input.Seek(startIndex + offset)
- lexerAction = la.lexerAction
- requiresSeek = (startIndex + offset) != stopIndex
- } else if lexerAction.getIsPositionDependent() {
- input.Seek(stopIndex)
- requiresSeek = false
- }
- lexerAction.execute(lexer)
- }
-}
-
-func (l *LexerActionExecutor) Hash() int {
- if l == nil {
- // TODO: Why is this here? l should not be nil
- return 61
- }
-
- // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode
- return l.cachedHash
-}
-
-func (l *LexerActionExecutor) Equals(other interface{}) bool {
- if l == other {
- return true
- }
- othert, ok := other.(*LexerActionExecutor)
- if !ok {
- return false
- }
- if othert == nil {
- return false
- }
- if l.cachedHash != othert.cachedHash {
- return false
- }
- if len(l.lexerActions) != len(othert.lexerActions) {
- return false
- }
- return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool {
- return i.Equals(j)
- })
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
deleted file mode 100644
index 76689615a..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-type LL1Analyzer struct {
- atn *ATN
-}
-
-func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
- la := new(LL1Analyzer)
- la.atn = atn
- return la
-}
-
-// - Special value added to the lookahead sets to indicate that we hit
-// a predicate during analysis if {@code seeThruPreds==false}.
-//
-// /
-const (
- LL1AnalyzerHitPred = TokenInvalidType
-)
-
-// *
-// Calculates the SLL(1) expected lookahead set for each outgoing transition
-// of an {@link ATNState}. The returned array has one element for each
-// outgoing transition in {@code s}. If the closure from transition
-// i leads to a semantic predicate before Matching a symbol, the
-// element at index i of the result will be {@code nil}.
-//
-// @param s the ATN state
-// @return the expected symbols for each outgoing transition of {@code s}.
-func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
- if s == nil {
- return nil
- }
- count := len(s.GetTransitions())
- look := make([]*IntervalSet, count)
- for alt := 0; alt < count; alt++ {
- look[alt] = NewIntervalSet()
- lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
- seeThruPreds := false // fail to get lookahead upon pred
- la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
- // Wipe out lookahead for la alternative if we found nothing
- // or we had a predicate when we !seeThruPreds
- if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
- look[alt] = nil
- }
- }
- return look
-}
-
-// *
-// Compute set of tokens that can follow {@code s} in the ATN in the
-// specified {@code ctx}.
-//
-// If {@code ctx} is {@code nil} and the end of the rule containing
-// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
-// If {@code ctx} is not {@code nil} and the end of the outermost rule is
-// reached, {@link Token//EOF} is added to the result set.
-//
-// @param s the ATN state
-// @param stopState the ATN state to stop at. This can be a
-// {@link BlockEndState} to detect epsilon paths through a closure.
-// @param ctx the complete parser context, or {@code nil} if the context
-// should be ignored
-//
-// @return The set of tokens that can follow {@code s} in the ATN in the
-// specified {@code ctx}.
-// /
-func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
- r := NewIntervalSet()
- seeThruPreds := true // ignore preds get all lookahead
- var lookContext PredictionContext
- if ctx != nil {
- lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
- }
- la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true)
- return r
-}
-
-//*
-// Compute set of tokens that can follow {@code s} in the ATN in the
-// specified {@code ctx}.
-//
-// If {@code ctx} is {@code nil} and {@code stopState} or the end of the
-// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
-// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
-// {@code true} and {@code stopState} or the end of the outermost rule is
-// reached, {@link Token//EOF} is added to the result set.
-//
-// @param s the ATN state.
-// @param stopState the ATN state to stop at. This can be a
-// {@link BlockEndState} to detect epsilon paths through a closure.
-// @param ctx The outer context, or {@code nil} if the outer context should
-// not be used.
-// @param look The result lookahead set.
-// @param lookBusy A set used for preventing epsilon closures in the ATN
-// from causing a stack overflow. Outside code should pass
-// {@code NewSet} for la argument.
-// @param calledRuleStack A set used for preventing left recursion in the
-// ATN from causing a stack overflow. Outside code should pass
-// {@code NewBitSet()} for la argument.
-// @param seeThruPreds {@code true} to true semantic predicates as
-// implicitly {@code true} and "see through them", otherwise {@code false}
-// to treat semantic predicates as opaque and add {@link //HitPred} to the
-// result if one is encountered.
-// @param addEOF Add {@link Token//EOF} to the result if the end of the
-// outermost context is reached. This parameter has no effect if {@code ctx}
-// is {@code nil}.
-
-func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
-
- returnState := la.atn.states[ctx.getReturnState(i)]
- la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
-
-}
-
-func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
-
- c := NewBaseATNConfig6(s, 0, ctx)
-
- if lookBusy.Contains(c) {
- return
- }
-
- _, present := lookBusy.Put(c)
- if present {
- return
-
- }
- if s == stopState {
- if ctx == nil {
- look.addOne(TokenEpsilon)
- return
- } else if ctx.isEmpty() && addEOF {
- look.addOne(TokenEOF)
- return
- }
- }
-
- _, ok := s.(*RuleStopState)
-
- if ok {
- if ctx == nil {
- look.addOne(TokenEpsilon)
- return
- } else if ctx.isEmpty() && addEOF {
- look.addOne(TokenEOF)
- return
- }
-
- if ctx != BasePredictionContextEMPTY {
- removed := calledRuleStack.contains(s.GetRuleIndex())
- defer func() {
- if removed {
- calledRuleStack.add(s.GetRuleIndex())
- }
- }()
- calledRuleStack.remove(s.GetRuleIndex())
- // run thru all possible stack tops in ctx
- for i := 0; i < ctx.length(); i++ {
- returnState := la.atn.states[ctx.getReturnState(i)]
- la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
- }
- return
- }
- }
-
- n := len(s.GetTransitions())
-
- for i := 0; i < n; i++ {
- t := s.GetTransitions()[i]
-
- if t1, ok := t.(*RuleTransition); ok {
- if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
- continue
- }
-
- newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
- la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
- } else if t2, ok := t.(AbstractPredicateTransition); ok {
- if seeThruPreds {
- la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
- } else {
- look.addOne(LL1AnalyzerHitPred)
- }
- } else if t.getIsEpsilon() {
- la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
- } else if _, ok := t.(*WildcardTransition); ok {
- look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
- } else {
- set := t.getLabel()
- if set != nil {
- if _, ok := t.(*NotSetTransition); ok {
- set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
- }
- look.addSet(set)
- }
- }
- }
-}
-
-func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
-
- newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
-
- defer func() {
- calledRuleStack.remove(t1.getTarget().GetRuleIndex())
- }()
-
- calledRuleStack.add(t1.getTarget().GetRuleIndex())
- la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
-
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
deleted file mode 100644
index d26bf0639..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
+++ /dev/null
@@ -1,708 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
-)
-
-type Parser interface {
- Recognizer
-
- GetInterpreter() *ParserATNSimulator
-
- GetTokenStream() TokenStream
- GetTokenFactory() TokenFactory
- GetParserRuleContext() ParserRuleContext
- SetParserRuleContext(ParserRuleContext)
- Consume() Token
- GetParseListeners() []ParseTreeListener
-
- GetErrorHandler() ErrorStrategy
- SetErrorHandler(ErrorStrategy)
- GetInputStream() IntStream
- GetCurrentToken() Token
- GetExpectedTokens() *IntervalSet
- NotifyErrorListeners(string, Token, RecognitionException)
- IsExpectedToken(int) bool
- GetPrecedence() int
- GetRuleInvocationStack(ParserRuleContext) []string
-}
-
-type BaseParser struct {
- *BaseRecognizer
-
- Interpreter *ParserATNSimulator
- BuildParseTrees bool
-
- input TokenStream
- errHandler ErrorStrategy
- precedenceStack IntStack
- ctx ParserRuleContext
-
- tracer *TraceListener
- parseListeners []ParseTreeListener
- _SyntaxErrors int
-}
-
-// p.is all the parsing support code essentially most of it is error
-// recovery stuff.//
-func NewBaseParser(input TokenStream) *BaseParser {
-
- p := new(BaseParser)
-
- p.BaseRecognizer = NewBaseRecognizer()
-
- // The input stream.
- p.input = nil
- // The error handling strategy for the parser. The default value is a new
- // instance of {@link DefaultErrorStrategy}.
- p.errHandler = NewDefaultErrorStrategy()
- p.precedenceStack = make([]int, 0)
- p.precedenceStack.Push(0)
- // The {@link ParserRuleContext} object for the currently executing rule.
- // p.is always non-nil during the parsing process.
- p.ctx = nil
- // Specifies whether or not the parser should construct a parse tree during
- // the parsing process. The default value is {@code true}.
- p.BuildParseTrees = true
- // When {@link //setTrace}{@code (true)} is called, a reference to the
- // {@link TraceListener} is stored here so it can be easily removed in a
- // later call to {@link //setTrace}{@code (false)}. The listener itself is
- // implemented as a parser listener so p.field is not directly used by
- // other parser methods.
- p.tracer = nil
- // The list of {@link ParseTreeListener} listeners registered to receive
- // events during the parse.
- p.parseListeners = nil
- // The number of syntax errors Reported during parsing. p.value is
- // incremented each time {@link //NotifyErrorListeners} is called.
- p._SyntaxErrors = 0
- p.SetInputStream(input)
-
- return p
-}
-
-// p.field maps from the serialized ATN string to the deserialized {@link
-// ATN} with
-// bypass alternatives.
-//
-// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
-var bypassAltsAtnCache = make(map[string]int)
-
-// reset the parser's state//
-func (p *BaseParser) reset() {
- if p.input != nil {
- p.input.Seek(0)
- }
- p.errHandler.reset(p)
- p.ctx = nil
- p._SyntaxErrors = 0
- p.SetTrace(nil)
- p.precedenceStack = make([]int, 0)
- p.precedenceStack.Push(0)
- if p.Interpreter != nil {
- p.Interpreter.reset()
- }
-}
-
-func (p *BaseParser) GetErrorHandler() ErrorStrategy {
- return p.errHandler
-}
-
-func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
- p.errHandler = e
-}
-
-// Match current input symbol against {@code ttype}. If the symbol type
-// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
-// called to complete the Match process.
-//
-// If the symbol type does not Match,
-// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
-// strategy to attempt recovery. If {@link //getBuildParseTree} is
-// {@code true} and the token index of the symbol returned by
-// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
-// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
-//
-// @param ttype the token type to Match
-// @return the Matched symbol
-// @panics RecognitionException if the current input symbol did not Match
-// {@code ttype} and the error strategy could not recover from the
-// mismatched symbol
-
-func (p *BaseParser) Match(ttype int) Token {
-
- t := p.GetCurrentToken()
-
- if t.GetTokenType() == ttype {
- p.errHandler.ReportMatch(p)
- p.Consume()
- } else {
- t = p.errHandler.RecoverInline(p)
- if p.BuildParseTrees && t.GetTokenIndex() == -1 {
- // we must have conjured up a Newtoken during single token
- // insertion
- // if it's not the current symbol
- p.ctx.AddErrorNode(t)
- }
- }
-
- return t
-}
-
-// Match current input symbol as a wildcard. If the symbol type Matches
-// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
-// and {@link //consume} are called to complete the Match process.
-//
-// If the symbol type does not Match,
-// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
-// strategy to attempt recovery. If {@link //getBuildParseTree} is
-// {@code true} and the token index of the symbol returned by
-// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
-// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
-//
-// @return the Matched symbol
-// @panics RecognitionException if the current input symbol did not Match
-// a wildcard and the error strategy could not recover from the mismatched
-// symbol
-
-func (p *BaseParser) MatchWildcard() Token {
- t := p.GetCurrentToken()
- if t.GetTokenType() > 0 {
- p.errHandler.ReportMatch(p)
- p.Consume()
- } else {
- t = p.errHandler.RecoverInline(p)
- if p.BuildParseTrees && t.GetTokenIndex() == -1 {
- // we must have conjured up a Newtoken during single token
- // insertion
- // if it's not the current symbol
- p.ctx.AddErrorNode(t)
- }
- }
- return t
-}
-
-func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
- return p.ctx
-}
-
-func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
- p.ctx = v
-}
-
-func (p *BaseParser) GetParseListeners() []ParseTreeListener {
- if p.parseListeners == nil {
- return make([]ParseTreeListener, 0)
- }
- return p.parseListeners
-}
-
-// Registers {@code listener} to receive events during the parsing process.
-//
-// To support output-preserving grammar transformations (including but not
-// limited to left-recursion removal, automated left-factoring, and
-// optimized code generation), calls to listener methods during the parse
-// may differ substantially from calls made by
-// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
-// particular, rule entry and exit events may occur in a different order
-// during the parse than after the parser. In addition, calls to certain
-// rule entry methods may be omitted.
-//
-// With the following specific exceptions, calls to listener events are
-// deterministic, i.e. for identical input the calls to listener
-// methods will be the same.
-//
-//
-// - Alterations to the grammar used to generate code may change the
-// behavior of the listener calls.
-// - Alterations to the command line options passed to ANTLR 4 when
-// generating the parser may change the behavior of the listener calls.
-// - Changing the version of the ANTLR Tool used to generate the parser
-// may change the behavior of the listener calls.
-//
-//
-// @param listener the listener to add
-//
-// @panics nilPointerException if {@code} listener is {@code nil}
-func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
- if listener == nil {
- panic("listener")
- }
- if p.parseListeners == nil {
- p.parseListeners = make([]ParseTreeListener, 0)
- }
- p.parseListeners = append(p.parseListeners, listener)
-}
-
-// Remove {@code listener} from the list of parse listeners.
-//
-// If {@code listener} is {@code nil} or has not been added as a parse
-// listener, p.method does nothing.
-// @param listener the listener to remove
-func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
-
- if p.parseListeners != nil {
-
- idx := -1
- for i, v := range p.parseListeners {
- if v == listener {
- idx = i
- break
- }
- }
-
- if idx == -1 {
- return
- }
-
- // remove the listener from the slice
- p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
-
- if len(p.parseListeners) == 0 {
- p.parseListeners = nil
- }
- }
-}
-
-// Remove all parse listeners.
-func (p *BaseParser) removeParseListeners() {
- p.parseListeners = nil
-}
-
-// Notify any parse listeners of an enter rule event.
-func (p *BaseParser) TriggerEnterRuleEvent() {
- if p.parseListeners != nil {
- ctx := p.ctx
- for _, listener := range p.parseListeners {
- listener.EnterEveryRule(ctx)
- ctx.EnterRule(listener)
- }
- }
-}
-
-// Notify any parse listeners of an exit rule event.
-//
-// @see //addParseListener
-func (p *BaseParser) TriggerExitRuleEvent() {
- if p.parseListeners != nil {
- // reverse order walk of listeners
- ctx := p.ctx
- l := len(p.parseListeners) - 1
-
- for i := range p.parseListeners {
- listener := p.parseListeners[l-i]
- ctx.ExitRule(listener)
- listener.ExitEveryRule(ctx)
- }
- }
-}
-
-func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
- return p.Interpreter
-}
-
-func (p *BaseParser) GetATN() *ATN {
- return p.Interpreter.atn
-}
-
-func (p *BaseParser) GetTokenFactory() TokenFactory {
- return p.input.GetTokenSource().GetTokenFactory()
-}
-
-// Tell our token source and error strategy about a Newway to create tokens.//
-func (p *BaseParser) setTokenFactory(factory TokenFactory) {
- p.input.GetTokenSource().setTokenFactory(factory)
-}
-
-// The ATN with bypass alternatives is expensive to create so we create it
-// lazily.
-//
-// @panics UnsupportedOperationException if the current parser does not
-// implement the {@link //getSerializedATN()} method.
-func (p *BaseParser) GetATNWithBypassAlts() {
-
- // TODO
- panic("Not implemented!")
-
- // serializedAtn := p.getSerializedATN()
- // if (serializedAtn == nil) {
- // panic("The current parser does not support an ATN with bypass alternatives.")
- // }
- // result := p.bypassAltsAtnCache[serializedAtn]
- // if (result == nil) {
- // deserializationOptions := NewATNDeserializationOptions(nil)
- // deserializationOptions.generateRuleBypassTransitions = true
- // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
- // p.bypassAltsAtnCache[serializedAtn] = result
- // }
- // return result
-}
-
-// The preferred method of getting a tree pattern. For example, here's a
-// sample use:
-//
-//
-// ParseTree t = parser.expr()
-// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
-// MyParser.RULE_expr)
-// ParseTreeMatch m = p.Match(t)
-// String id = m.Get("ID")
-//
-
-func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
-
- panic("NewParseTreePatternMatcher not implemented!")
- //
- // if (lexer == nil) {
- // if (p.GetTokenStream() != nil) {
- // tokenSource := p.GetTokenStream().GetTokenSource()
- // if _, ok := tokenSource.(ILexer); ok {
- // lexer = tokenSource
- // }
- // }
- // }
- // if (lexer == nil) {
- // panic("Parser can't discover a lexer to use")
- // }
-
- // m := NewParseTreePatternMatcher(lexer, p)
- // return m.compile(pattern, patternRuleIndex)
-}
-
-func (p *BaseParser) GetInputStream() IntStream {
- return p.GetTokenStream()
-}
-
-func (p *BaseParser) SetInputStream(input TokenStream) {
- p.SetTokenStream(input)
-}
-
-func (p *BaseParser) GetTokenStream() TokenStream {
- return p.input
-}
-
-// Set the token stream and reset the parser.//
-func (p *BaseParser) SetTokenStream(input TokenStream) {
- p.input = nil
- p.reset()
- p.input = input
-}
-
-// Match needs to return the current input symbol, which gets put
-// into the label for the associated token ref e.g., x=ID.
-func (p *BaseParser) GetCurrentToken() Token {
- return p.input.LT(1)
-}
-
-func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
- if offendingToken == nil {
- offendingToken = p.GetCurrentToken()
- }
- p._SyntaxErrors++
- line := offendingToken.GetLine()
- column := offendingToken.GetColumn()
- listener := p.GetErrorListenerDispatch()
- listener.SyntaxError(p, offendingToken, line, column, msg, err)
-}
-
-func (p *BaseParser) Consume() Token {
- o := p.GetCurrentToken()
- if o.GetTokenType() != TokenEOF {
- p.GetInputStream().Consume()
- }
- hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
- if p.BuildParseTrees || hasListener {
- if p.errHandler.InErrorRecoveryMode(p) {
- node := p.ctx.AddErrorNode(o)
- if p.parseListeners != nil {
- for _, l := range p.parseListeners {
- l.VisitErrorNode(node)
- }
- }
-
- } else {
- node := p.ctx.AddTokenNode(o)
- if p.parseListeners != nil {
- for _, l := range p.parseListeners {
- l.VisitTerminal(node)
- }
- }
- }
- // node.invokingState = p.state
- }
-
- return o
-}
-
-func (p *BaseParser) addContextToParseTree() {
- // add current context to parent if we have a parent
- if p.ctx.GetParent() != nil {
- p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
- }
-}
-
-func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
- p.SetState(state)
- p.ctx = localctx
- p.ctx.SetStart(p.input.LT(1))
- if p.BuildParseTrees {
- p.addContextToParseTree()
- }
- if p.parseListeners != nil {
- p.TriggerEnterRuleEvent()
- }
-}
-
-func (p *BaseParser) ExitRule() {
- p.ctx.SetStop(p.input.LT(-1))
- // trigger event on ctx, before it reverts to parent
- if p.parseListeners != nil {
- p.TriggerExitRuleEvent()
- }
- p.SetState(p.ctx.GetInvokingState())
- if p.ctx.GetParent() != nil {
- p.ctx = p.ctx.GetParent().(ParserRuleContext)
- } else {
- p.ctx = nil
- }
-}
-
-func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
- localctx.SetAltNumber(altNum)
- // if we have Newlocalctx, make sure we replace existing ctx
- // that is previous child of parse tree
- if p.BuildParseTrees && p.ctx != localctx {
- if p.ctx.GetParent() != nil {
- p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
- p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
- }
- }
- p.ctx = localctx
-}
-
-// Get the precedence level for the top-most precedence rule.
-//
-// @return The precedence level for the top-most precedence rule, or -1 if
-// the parser context is not nested within a precedence rule.
-
-func (p *BaseParser) GetPrecedence() int {
- if len(p.precedenceStack) == 0 {
- return -1
- }
-
- return p.precedenceStack[len(p.precedenceStack)-1]
-}
-
-func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
- p.SetState(state)
- p.precedenceStack.Push(precedence)
- p.ctx = localctx
- p.ctx.SetStart(p.input.LT(1))
- if p.parseListeners != nil {
- p.TriggerEnterRuleEvent() // simulates rule entry for
- // left-recursive rules
- }
-}
-
-//
-// Like {@link //EnterRule} but for recursive rules.
-
-func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
- previous := p.ctx
- previous.SetParent(localctx)
- previous.SetInvokingState(state)
- previous.SetStop(p.input.LT(-1))
-
- p.ctx = localctx
- p.ctx.SetStart(previous.GetStart())
- if p.BuildParseTrees {
- p.ctx.AddChild(previous)
- }
- if p.parseListeners != nil {
- p.TriggerEnterRuleEvent() // simulates rule entry for
- // left-recursive rules
- }
-}
-
-func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
- p.precedenceStack.Pop()
- p.ctx.SetStop(p.input.LT(-1))
- retCtx := p.ctx // save current ctx (return value)
- // unroll so ctx is as it was before call to recursive method
- if p.parseListeners != nil {
- for p.ctx != parentCtx {
- p.TriggerExitRuleEvent()
- p.ctx = p.ctx.GetParent().(ParserRuleContext)
- }
- } else {
- p.ctx = parentCtx
- }
- // hook into tree
- retCtx.SetParent(parentCtx)
- if p.BuildParseTrees && parentCtx != nil {
- // add return ctx into invoking rule's tree
- parentCtx.AddChild(retCtx)
- }
-}
-
-func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
- ctx := p.ctx
- for ctx != nil {
- if ctx.GetRuleIndex() == ruleIndex {
- return ctx
- }
- ctx = ctx.GetParent().(ParserRuleContext)
- }
- return nil
-}
-
-func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
- return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
-}
-
-func (p *BaseParser) inContext(context ParserRuleContext) bool {
- // TODO: useful in parser?
- return false
-}
-
-//
-// Checks whether or not {@code symbol} can follow the current state in the
-// ATN. The behavior of p.method is equivalent to the following, but is
-// implemented such that the complete context-sensitive follow set does not
-// need to be explicitly constructed.
-//
-//
-// return getExpectedTokens().contains(symbol)
-//
-//
-// @param symbol the symbol type to check
-// @return {@code true} if {@code symbol} can follow the current state in
-// the ATN, otherwise {@code false}.
-
-func (p *BaseParser) IsExpectedToken(symbol int) bool {
- atn := p.Interpreter.atn
- ctx := p.ctx
- s := atn.states[p.state]
- following := atn.NextTokens(s, nil)
- if following.contains(symbol) {
- return true
- }
- if !following.contains(TokenEpsilon) {
- return false
- }
- for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
- invokingState := atn.states[ctx.GetInvokingState()]
- rt := invokingState.GetTransitions()[0]
- following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
- if following.contains(symbol) {
- return true
- }
- ctx = ctx.GetParent().(ParserRuleContext)
- }
- if following.contains(TokenEpsilon) && symbol == TokenEOF {
- return true
- }
-
- return false
-}
-
-// Computes the set of input symbols which could follow the current parser
-// state and context, as given by {@link //GetState} and {@link //GetContext},
-// respectively.
-//
-// @see ATN//getExpectedTokens(int, RuleContext)
-func (p *BaseParser) GetExpectedTokens() *IntervalSet {
- return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
-}
-
-func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
- atn := p.Interpreter.atn
- s := atn.states[p.state]
- return atn.NextTokens(s, nil)
-}
-
-// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
-func (p *BaseParser) GetRuleIndex(ruleName string) int {
- var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
- if ok {
- return ruleIndex
- }
-
- return -1
-}
-
-// Return List<String> of the rule names in your parser instance
-// leading up to a call to the current rule. You could override if
-// you want more details such as the file/line info of where
-// in the ATN a rule is invoked.
-//
-// this very useful for error messages.
-
-func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
- if c == nil {
- c = p.ctx
- }
- stack := make([]string, 0)
- for c != nil {
- // compute what follows who invoked us
- ruleIndex := c.GetRuleIndex()
- if ruleIndex < 0 {
- stack = append(stack, "n/a")
- } else {
- stack = append(stack, p.GetRuleNames()[ruleIndex])
- }
-
- vp := c.GetParent()
-
- if vp == nil {
- break
- }
-
- c = vp.(ParserRuleContext)
- }
- return stack
-}
-
-// For debugging and other purposes.//
-func (p *BaseParser) GetDFAStrings() string {
- return fmt.Sprint(p.Interpreter.decisionToDFA)
-}
-
-// For debugging and other purposes.//
-func (p *BaseParser) DumpDFA() {
- seenOne := false
- for _, dfa := range p.Interpreter.decisionToDFA {
- if dfa.states.Len() > 0 {
- if seenOne {
- fmt.Println()
- }
- fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
- fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
- seenOne = true
- }
- }
-}
-
-func (p *BaseParser) GetSourceName() string {
- return p.GrammarFileName
-}
-
-// During a parse is sometimes useful to listen in on the rule entry and exit
-// events as well as token Matches. p.is for quick and dirty debugging.
-func (p *BaseParser) SetTrace(trace *TraceListener) {
- if trace == nil {
- p.RemoveParseListener(p.tracer)
- p.tracer = nil
- } else {
- if p.tracer != nil {
- p.RemoveParseListener(p.tracer)
- }
- p.tracer = NewTraceListener(p)
- p.AddParseListener(p.tracer)
- }
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
deleted file mode 100644
index 8bcc46a0d..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
+++ /dev/null
@@ -1,1559 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-var (
- ParserATNSimulatorDebug = false
- ParserATNSimulatorTraceATNSim = false
- ParserATNSimulatorDFADebug = false
- ParserATNSimulatorRetryDebug = false
- TurnOffLRLoopEntryBranchOpt = false
-)
-
-type ParserATNSimulator struct {
- *BaseATNSimulator
-
- parser Parser
- predictionMode int
- input TokenStream
- startIndex int
- dfa *DFA
- mergeCache *DoubleDict
- outerContext ParserRuleContext
-}
-
-func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
-
- p := new(ParserATNSimulator)
-
- p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
-
- p.parser = parser
- p.decisionToDFA = decisionToDFA
- // SLL, LL, or LL + exact ambig detection?//
- p.predictionMode = PredictionModeLL
- // LAME globals to avoid parameters!!!!! I need these down deep in predTransition
- p.input = nil
- p.startIndex = 0
- p.outerContext = nil
- p.dfa = nil
- // Each prediction operation uses a cache for merge of prediction contexts.
- // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
- // isn't Synchronized but we're ok since two threads shouldn't reuse same
- // parser/atnsim object because it can only handle one input at a time.
- // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid
- // the merge if we ever see a and b again. Note that (b,a)&rarrc should
- // also be examined during cache lookup.
- //
- p.mergeCache = nil
-
- return p
-}
-
-func (p *ParserATNSimulator) GetPredictionMode() int {
- return p.predictionMode
-}
-
-func (p *ParserATNSimulator) SetPredictionMode(v int) {
- p.predictionMode = v
-}
-
-func (p *ParserATNSimulator) reset() {
-}
-
-func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int {
- if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
- fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
- " exec LA(1)==" + p.getLookaheadName(input) +
- " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
- strconv.Itoa(input.LT(1).GetColumn()))
- }
-
- p.input = input
- p.startIndex = input.Index()
- p.outerContext = outerContext
-
- dfa := p.decisionToDFA[decision]
- p.dfa = dfa
- m := input.Mark()
- index := input.Index()
-
- defer func() {
- p.dfa = nil
- p.mergeCache = nil // wack cache after each prediction
- input.Seek(index)
- input.Release(m)
- }()
-
- // Now we are certain to have a specific decision's DFA
- // But, do we still need an initial state?
- var s0 *DFAState
- p.atn.stateMu.RLock()
- if dfa.getPrecedenceDfa() {
- p.atn.edgeMu.RLock()
- // the start state for a precedence DFA depends on the current
- // parser precedence, and is provided by a DFA method.
- s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence())
- p.atn.edgeMu.RUnlock()
- } else {
- // the start state for a "regular" DFA is just s0
- s0 = dfa.getS0()
- }
- p.atn.stateMu.RUnlock()
-
- if s0 == nil {
- if outerContext == nil {
- outerContext = ParserRuleContextEmpty
- }
- if ParserATNSimulatorDebug {
- fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
- " exec LA(1)==" + p.getLookaheadName(input) +
- ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
- }
- fullCtx := false
- s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx)
-
- p.atn.stateMu.Lock()
- if dfa.getPrecedenceDfa() {
- // If p is a precedence DFA, we use applyPrecedenceFilter
- // to convert the computed start state to a precedence start
- // state. We then use DFA.setPrecedenceStartState to set the
- // appropriate start state for the precedence level rather
- // than simply setting DFA.s0.
- //
- dfa.s0.configs = s0Closure
- s0Closure = p.applyPrecedenceFilter(s0Closure)
- s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
- p.atn.edgeMu.Lock()
- dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0)
- p.atn.edgeMu.Unlock()
- } else {
- s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
- dfa.setS0(s0)
- }
- p.atn.stateMu.Unlock()
- }
-
- alt := p.execATN(dfa, s0, input, index, outerContext)
- if ParserATNSimulatorDebug {
- fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
- }
- return alt
-
-}
-
-// Performs ATN simulation to compute a predicted alternative based
-// upon the remaining input, but also updates the DFA cache to avoid
-// having to traverse the ATN again for the same input sequence.
-
-// There are some key conditions we're looking for after computing a new
-// set of ATN configs (proposed DFA state):
-// if the set is empty, there is no viable alternative for current symbol
-// does the state uniquely predict an alternative?
-// does the state have a conflict that would prevent us from
-// putting it on the work list?
-
-// We also have some key operations to do:
-// add an edge from previous DFA state to potentially NewDFA state, D,
-// upon current symbol but only if adding to work list, which means in all
-// cases except no viable alternative (and possibly non-greedy decisions?)
-// collecting predicates and adding semantic context to DFA accept states
-// adding rule context to context-sensitive DFA accept states
-// consuming an input symbol
-// Reporting a conflict
-// Reporting an ambiguity
-// Reporting a context sensitivity
-// Reporting insufficient predicates
-
-// cover these cases:
-//
-// dead end
-// single alt
-// single alt + preds
-// conflict
-// conflict + preds
-func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
-
- if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
- fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
- ", DFA state " + s0.String() +
- ", LA(1)==" + p.getLookaheadName(input) +
- " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
- }
-
- previousD := s0
-
- if ParserATNSimulatorDebug {
- fmt.Println("s0 = " + s0.String())
- }
- t := input.LA(1)
- for { // for more work
- D := p.getExistingTargetState(previousD, t)
- if D == nil {
- D = p.computeTargetState(dfa, previousD, t)
- }
- if D == ATNSimulatorError {
- // if any configs in previous dipped into outer context, that
- // means that input up to t actually finished entry rule
- // at least for SLL decision. Full LL doesn't dip into outer
- // so don't need special case.
- // We will get an error no matter what so delay until after
- // decision better error message. Also, no reachable target
- // ATN states in SLL implies LL will also get nowhere.
- // If conflict in states that dip out, choose min since we
- // will get error no matter what.
- e := p.noViableAlt(input, outerContext, previousD.configs, startIndex)
- input.Seek(startIndex)
- alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
- if alt != ATNInvalidAltNumber {
- return alt
- }
-
- panic(e)
- }
- if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
- // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
- conflictingAlts := D.configs.GetConflictingAlts()
- if D.predicates != nil {
- if ParserATNSimulatorDebug {
- fmt.Println("DFA state has preds in DFA sim LL failover")
- }
- conflictIndex := input.Index()
- if conflictIndex != startIndex {
- input.Seek(startIndex)
- }
- conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
- if conflictingAlts.length() == 1 {
- if ParserATNSimulatorDebug {
- fmt.Println("Full LL avoided")
- }
- return conflictingAlts.minValue()
- }
- if conflictIndex != startIndex {
- // restore the index so Reporting the fallback to full
- // context occurs with the index at the correct spot
- input.Seek(conflictIndex)
- }
- }
- if ParserATNSimulatorDFADebug {
- fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
- }
- fullCtx := true
- s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
- p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
- alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
- return alt
- }
- if D.isAcceptState {
- if D.predicates == nil {
- return D.prediction
- }
- stopIndex := input.Index()
- input.Seek(startIndex)
- alts := p.evalSemanticContext(D.predicates, outerContext, true)
-
- switch alts.length() {
- case 0:
- panic(p.noViableAlt(input, outerContext, D.configs, startIndex))
- case 1:
- return alts.minValue()
- default:
- // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
- p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
- return alts.minValue()
- }
- }
- previousD = D
-
- if t != TokenEOF {
- input.Consume()
- t = input.LA(1)
- }
- }
-}
-
-// Get an existing target state for an edge in the DFA. If the target state
-// for the edge has not yet been computed or is otherwise not available,
-// p method returns {@code nil}.
-//
-// @param previousD The current DFA state
-// @param t The next input symbol
-// @return The existing target DFA state for the given input symbol
-// {@code t}, or {@code nil} if the target state for p edge is not
-// already cached
-
-func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
- if t+1 < 0 {
- return nil
- }
-
- p.atn.edgeMu.RLock()
- defer p.atn.edgeMu.RUnlock()
- edges := previousD.getEdges()
- if edges == nil || t+1 >= len(edges) {
- return nil
- }
- return previousD.getIthEdge(t + 1)
-}
-
-// Compute a target state for an edge in the DFA, and attempt to add the
-// computed state and corresponding edge to the DFA.
-//
-// @param dfa The DFA
-// @param previousD The current DFA state
-// @param t The next input symbol
-//
-// @return The computed target DFA state for the given input symbol
-// {@code t}. If {@code t} does not lead to a valid DFA state, p method
-// returns {@link //ERROR}.
-
-func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
- reach := p.computeReachSet(previousD.configs, t, false)
-
- if reach == nil {
- p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
- return ATNSimulatorError
- }
- // create Newtarget state we'll add to DFA after it's complete
- D := NewDFAState(-1, reach)
-
- predictedAlt := p.getUniqueAlt(reach)
-
- if ParserATNSimulatorDebug {
- altSubSets := PredictionModegetConflictingAltSubsets(reach)
- fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
- ", previous=" + previousD.configs.String() +
- ", configs=" + reach.String() +
- ", predict=" + strconv.Itoa(predictedAlt) +
- ", allSubsetsConflict=" +
- fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) +
- ", conflictingAlts=" + p.getConflictingAlts(reach).String())
- }
- if predictedAlt != ATNInvalidAltNumber {
- // NO CONFLICT, UNIQUELY PREDICTED ALT
- D.isAcceptState = true
- D.configs.SetUniqueAlt(predictedAlt)
- D.setPrediction(predictedAlt)
- } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
- // MORE THAN ONE VIABLE ALTERNATIVE
- D.configs.SetConflictingAlts(p.getConflictingAlts(reach))
- D.requiresFullContext = true
- // in SLL-only mode, we will stop at p state and return the minimum alt
- D.isAcceptState = true
- D.setPrediction(D.configs.GetConflictingAlts().minValue())
- }
- if D.isAcceptState && D.configs.HasSemanticContext() {
- p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
- if D.predicates != nil {
- D.setPrediction(ATNInvalidAltNumber)
- }
- }
- // all adds to dfa are done after we've created full D state
- D = p.addDFAEdge(dfa, previousD, t, D)
- return D
-}
-
-func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) {
- // We need to test all predicates, even in DFA states that
- // uniquely predict alternative.
- nalts := len(decisionState.GetTransitions())
- // Update DFA so reach becomes accept state with (predicate,alt)
- // pairs if preds found for conflicting alts
- altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs)
- altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
- if altToPred != nil {
- dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
- dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds
- } else {
- // There are preds in configs but they might go away
- // when OR'd together like {p}? || NONE == NONE. If neither
- // alt has preds, resolve to min alt
- dfaState.setPrediction(altsToCollectPredsFrom.minValue())
- }
-}
-
-// comes back with reach.uniqueAlt set to a valid alt
-func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
-
- if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
- fmt.Println("execATNWithFullContext " + s0.String())
- }
-
- fullCtx := true
- foundExactAmbig := false
- var reach ATNConfigSet
- previous := s0
- input.Seek(startIndex)
- t := input.LA(1)
- predictedAlt := -1
-
- for { // for more work
- reach = p.computeReachSet(previous, t, fullCtx)
- if reach == nil {
- // if any configs in previous dipped into outer context, that
- // means that input up to t actually finished entry rule
- // at least for LL decision. Full LL doesn't dip into outer
- // so don't need special case.
- // We will get an error no matter what so delay until after
- // decision better error message. Also, no reachable target
- // ATN states in SLL implies LL will also get nowhere.
- // If conflict in states that dip out, choose min since we
- // will get error no matter what.
- e := p.noViableAlt(input, outerContext, previous, startIndex)
- input.Seek(startIndex)
- alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
- if alt != ATNInvalidAltNumber {
- return alt
- }
-
- panic(e)
- }
- altSubSets := PredictionModegetConflictingAltSubsets(reach)
- if ParserATNSimulatorDebug {
- fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
- strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
- fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
- }
- reach.SetUniqueAlt(p.getUniqueAlt(reach))
- // unique prediction?
- if reach.GetUniqueAlt() != ATNInvalidAltNumber {
- predictedAlt = reach.GetUniqueAlt()
- break
- }
- if p.predictionMode != PredictionModeLLExactAmbigDetection {
- predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets)
- if predictedAlt != ATNInvalidAltNumber {
- break
- }
- } else {
- // In exact ambiguity mode, we never try to terminate early.
- // Just keeps scarfing until we know what the conflict is
- if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) {
- foundExactAmbig = true
- predictedAlt = PredictionModegetSingleViableAlt(altSubSets)
- break
- }
- // else there are multiple non-conflicting subsets or
- // we're not sure what the ambiguity is yet.
- // So, keep going.
- }
- previous = reach
- if t != TokenEOF {
- input.Consume()
- t = input.LA(1)
- }
- }
- // If the configuration set uniquely predicts an alternative,
- // without conflict, then we know that it's a full LL decision
- // not SLL.
- if reach.GetUniqueAlt() != ATNInvalidAltNumber {
- p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
- return predictedAlt
- }
- // We do not check predicates here because we have checked them
- // on-the-fly when doing full context prediction.
-
- //
- // In non-exact ambiguity detection mode, we might actually be able to
- // detect an exact ambiguity, but I'm not going to spend the cycles
- // needed to check. We only emit ambiguity warnings in exact ambiguity
- // mode.
- //
- // For example, we might know that we have conflicting configurations.
- // But, that does not mean that there is no way forward without a
- // conflict. It's possible to have nonconflicting alt subsets as in:
-
- // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
-
- // from
- //
- // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
- // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
- //
- // In p case, (17,1,[5 $]) indicates there is some next sequence that
- // would resolve p without conflict to alternative 1. Any other viable
- // next sequence, however, is associated with a conflict. We stop
- // looking for input because no amount of further lookahead will alter
- // the fact that we should predict alternative 1. We just can't say for
- // sure that there is an ambiguity without looking further.
-
- p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach)
-
- return predictedAlt
-}
-
-func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet {
- if p.mergeCache == nil {
- p.mergeCache = NewDoubleDict()
- }
- intermediate := NewBaseATNConfigSet(fullCtx)
-
- // Configurations already in a rule stop state indicate reaching the end
- // of the decision rule (local context) or end of the start rule (full
- // context). Once reached, these configurations are never updated by a
- // closure operation, so they are handled separately for the performance
- // advantage of having a smaller intermediate set when calling closure.
- //
- // For full-context reach operations, separate handling is required to
- // ensure that the alternative Matching the longest overall sequence is
- // chosen when multiple such configurations can Match the input.
-
- var skippedStopStates []*BaseATNConfig
-
- // First figure out where we can reach on input t
- for _, c := range closure.GetItems() {
- if ParserATNSimulatorDebug {
- fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
- }
-
- if _, ok := c.GetState().(*RuleStopState); ok {
- if fullCtx || t == TokenEOF {
- skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig))
- if ParserATNSimulatorDebug {
- fmt.Println("added " + c.String() + " to SkippedStopStates")
- }
- }
- continue
- }
-
- for _, trans := range c.GetState().GetTransitions() {
- target := p.getReachableTarget(trans, t)
- if target != nil {
- cfg := NewBaseATNConfig4(c, target)
- intermediate.Add(cfg, p.mergeCache)
- if ParserATNSimulatorDebug {
- fmt.Println("added " + cfg.String() + " to intermediate")
- }
- }
- }
- }
-
- // Now figure out where the reach operation can take us...
- var reach ATNConfigSet
-
- // This block optimizes the reach operation for intermediate sets which
- // trivially indicate a termination state for the overall
- // AdaptivePredict operation.
- //
- // The conditions assume that intermediate
- // contains all configurations relevant to the reach set, but p
- // condition is not true when one or more configurations have been
- // withheld in SkippedStopStates, or when the current symbol is EOF.
- //
- if skippedStopStates == nil && t != TokenEOF {
- if len(intermediate.configs) == 1 {
- // Don't pursue the closure if there is just one state.
- // It can only have one alternative just add to result
- // Also don't pursue the closure if there is unique alternative
- // among the configurations.
- reach = intermediate
- } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber {
- // Also don't pursue the closure if there is unique alternative
- // among the configurations.
- reach = intermediate
- }
- }
- // If the reach set could not be trivially determined, perform a closure
- // operation on the intermediate set to compute its initial value.
- //
- if reach == nil {
- reach = NewBaseATNConfigSet(fullCtx)
- closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
- treatEOFAsEpsilon := t == TokenEOF
- amount := len(intermediate.configs)
- for k := 0; k < amount; k++ {
- p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon)
- }
- }
- if t == TokenEOF {
- // After consuming EOF no additional input is possible, so we are
- // only interested in configurations which reached the end of the
- // decision rule (local context) or end of the start rule (full
- // context). Update reach to contain only these configurations. This
- // handles both explicit EOF transitions in the grammar and implicit
- // EOF transitions following the end of the decision or start rule.
- //
- // When reach==intermediate, no closure operation was performed. In
- // p case, removeAllConfigsNotInRuleStopState needs to check for
- // reachable rule stop states as well as configurations already in
- // a rule stop state.
- //
- // This is handled before the configurations in SkippedStopStates,
- // because any configurations potentially added from that list are
- // already guaranteed to meet p condition whether or not it's
- // required.
- //
- reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate)
- }
- // If SkippedStopStates!=nil, then it contains at least one
- // configuration. For full-context reach operations, these
- // configurations reached the end of the start rule, in which case we
- // only add them back to reach if no configuration during the current
- // closure operation reached such a state. This ensures AdaptivePredict
- // chooses an alternative Matching the longest overall sequence when
- // multiple alternatives are viable.
- //
- if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
- for l := 0; l < len(skippedStopStates); l++ {
- reach.Add(skippedStopStates[l], p.mergeCache)
- }
- }
-
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String())
- }
-
- if len(reach.GetItems()) == 0 {
- return nil
- }
-
- return reach
-}
-
-// Return a configuration set containing only the configurations from
-// {@code configs} which are in a {@link RuleStopState}. If all
-// configurations in {@code configs} are already in a rule stop state, p
-// method simply returns {@code configs}.
-//
-// When {@code lookToEndOfRule} is true, p method uses
-// {@link ATN//NextTokens} for each configuration in {@code configs} which is
-// not already in a rule stop state to see if a rule stop state is reachable
-// from the configuration via epsilon-only transitions.
-//
-// @param configs the configuration set to update
-// @param lookToEndOfRule when true, p method checks for rule stop states
-// reachable by epsilon-only transitions from each configuration in
-// {@code configs}.
-//
-// @return {@code configs} if all configurations in {@code configs} are in a
-// rule stop state, otherwise return a Newconfiguration set containing only
-// the configurations from {@code configs} which are in a rule stop state
-func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet {
- if PredictionModeallConfigsInRuleStopStates(configs) {
- return configs
- }
- result := NewBaseATNConfigSet(configs.FullContext())
- for _, config := range configs.GetItems() {
- if _, ok := config.GetState().(*RuleStopState); ok {
- result.Add(config, p.mergeCache)
- continue
- }
- if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() {
- NextTokens := p.atn.NextTokens(config.GetState(), nil)
- if NextTokens.contains(TokenEpsilon) {
- endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
- result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache)
- }
- }
- }
- return result
-}
-
-func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet {
- // always at least the implicit call to start rule
- initialContext := predictionContextFromRuleContext(p.atn, ctx)
- configs := NewBaseATNConfigSet(fullCtx)
- if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
- fmt.Println("computeStartState from ATN state " + a.String() +
- " initialContext=" + initialContext.String())
- }
-
- for i := 0; i < len(a.GetTransitions()); i++ {
- target := a.GetTransitions()[i].getTarget()
- c := NewBaseATNConfig6(target, i+1, initialContext)
- closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
- p.closure(c, configs, closureBusy, true, fullCtx, false)
- }
- return configs
-}
-
-// This method transforms the start state computed by
-// {@link //computeStartState} to the special start state used by a
-// precedence DFA for a particular precedence value. The transformation
-// process applies the following changes to the start state's configuration
-// set.
-//
-//
-// - Evaluate the precedence predicates for each configuration using
-// {@link SemanticContext//evalPrecedence}.
-// - Remove all configurations which predict an alternative greater than
-// 1, for which another configuration that predicts alternative 1 is in the
-// same ATN state with the same prediction context. This transformation is
-// valid for the following reasons:
-//
-// - The closure block cannot contain any epsilon transitions which bypass
-// the body of the closure, so all states reachable via alternative 1 are
-// part of the precedence alternatives of the transformed left-recursive
-// rule.
-// - The "primary" portion of a left recursive rule cannot contain an
-// epsilon transition, so the only way an alternative other than 1 can exist
-// in a state that is also reachable via alternative 1 is by nesting calls
-// to the left-recursive rule, with the outer calls not being at the
-// preferred precedence level.
-//
-//
-//
-//
-//
-// The prediction context must be considered by p filter to address
-// situations like the following.
-//
-//
-//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
-//
-//
-//
-// If the above grammar, the ATN state immediately before the token
-// reference {@code 'a'} in {@code letterA} is reachable from the left edge
-// of both the primary and closure blocks of the left-recursive rule
-// {@code statement}. The prediction context associated with each of these
-// configurations distinguishes between them, and prevents the alternative
-// which stepped out to {@code prog} (and then back in to {@code statement}
-// from being eliminated by the filter.
-//
-//
-// @param configs The configuration set computed by
-// {@link //computeStartState} as the start state for the DFA.
-// @return The transformed configuration set representing the start state
-// for a precedence DFA at a particular precedence level (determined by
-// calling {@link Parser//getPrecedence}).
-func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet {
-
- statesFromAlt1 := make(map[int]PredictionContext)
- configSet := NewBaseATNConfigSet(configs.FullContext())
-
- for _, config := range configs.GetItems() {
- // handle alt 1 first
- if config.GetAlt() != 1 {
- continue
- }
- updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext)
- if updatedContext == nil {
- // the configuration was eliminated
- continue
- }
- statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
- if updatedContext != config.GetSemanticContext() {
- configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache)
- } else {
- configSet.Add(config, p.mergeCache)
- }
- }
- for _, config := range configs.GetItems() {
-
- if config.GetAlt() == 1 {
- // already handled
- continue
- }
- // In the future, p elimination step could be updated to also
- // filter the prediction context for alternatives predicting alt>1
- // (basically a graph subtraction algorithm).
- if !config.getPrecedenceFilterSuppressed() {
- context := statesFromAlt1[config.GetState().GetStateNumber()]
- if context != nil && context.Equals(config.GetContext()) {
- // eliminated
- continue
- }
- }
- configSet.Add(config, p.mergeCache)
- }
- return configSet
-}
-
-func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState {
- if trans.Matches(ttype, 0, p.atn.maxTokenType) {
- return trans.getTarget()
- }
-
- return nil
-}
-
-func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext {
-
- altToPred := make([]SemanticContext, nalts+1)
- for _, c := range configs.GetItems() {
- if ambigAlts.contains(c.GetAlt()) {
- altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
- }
- }
- nPredAlts := 0
- for i := 1; i <= nalts; i++ {
- pred := altToPred[i]
- if pred == nil {
- altToPred[i] = SemanticContextNone
- } else if pred != SemanticContextNone {
- nPredAlts++
- }
- }
- // nonambig alts are nil in altToPred
- if nPredAlts == 0 {
- altToPred = nil
- }
- if ParserATNSimulatorDebug {
- fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
- }
- return altToPred
-}
-
-func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction {
- pairs := make([]*PredPrediction, 0)
- containsPredicate := false
- for i := 1; i < len(altToPred); i++ {
- pred := altToPred[i]
- // unpredicated is indicated by SemanticContextNONE
- if ambigAlts != nil && ambigAlts.contains(i) {
- pairs = append(pairs, NewPredPrediction(pred, i))
- }
- if pred != SemanticContextNone {
- containsPredicate = true
- }
- }
- if !containsPredicate {
- return nil
- }
- return pairs
-}
-
-// This method is used to improve the localization of error messages by
-// choosing an alternative rather than panicing a
-// {@link NoViableAltException} in particular prediction scenarios where the
-// {@link //ERROR} state was reached during ATN simulation.
-//
-//
-// The default implementation of p method uses the following
-// algorithm to identify an ATN configuration which successfully parsed the
-// decision entry rule. Choosing such an alternative ensures that the
-// {@link ParserRuleContext} returned by the calling rule will be complete
-// and valid, and the syntax error will be Reported later at a more
-// localized location.
-//
-//
-// - If a syntactically valid path or paths reach the end of the decision rule and
-// they are semantically valid if predicated, return the min associated alt.
-// - Else, if a semantically invalid but syntactically valid path exist
-// or paths exist, return the minimum associated alt.
-//
-// - Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
-//
-//
-//
-// In some scenarios, the algorithm described above could predict an
-// alternative which will result in a {@link FailedPredicateException} in
-// the parser. Specifically, p could occur if the only configuration
-// capable of successfully parsing to the end of the decision rule is
-// blocked by a semantic predicate. By choosing p alternative within
-// {@link //AdaptivePredict} instead of panicing a
-// {@link NoViableAltException}, the resulting
-// {@link FailedPredicateException} in the parser will identify the specific
-// predicate which is preventing the parser from successfully parsing the
-// decision rule, which helps developers identify and correct logic errors
-// in semantic predicates.
-//
-//
-// @param configs The ATN configurations which were valid immediately before
-// the {@link //ERROR} state was reached
-// @param outerContext The is the \gamma_0 initial parser context from the paper
-// or the parser stack at the instant before prediction commences.
-//
-// @return The value to return from {@link //AdaptivePredict}, or
-// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not
-// identified and {@link //AdaptivePredict} should Report an error instead.
-func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int {
- cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
- semValidConfigs := cfgs[0]
- semInvalidConfigs := cfgs[1]
- alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs)
- if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists
- return alt
- }
- // Is there a syntactically valid path with a failed pred?
- if len(semInvalidConfigs.GetItems()) > 0 {
- alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
- if alt != ATNInvalidAltNumber { // syntactically viable path exists
- return alt
- }
- }
- return ATNInvalidAltNumber
-}
-
-func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int {
- alts := NewIntervalSet()
-
- for _, c := range configs.GetItems() {
- _, ok := c.GetState().(*RuleStopState)
-
- if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
- alts.addOne(c.GetAlt())
- }
- }
- if alts.length() == 0 {
- return ATNInvalidAltNumber
- }
-
- return alts.first()
-}
-
-// Walk the list of configurations and split them according to
-// those that have preds evaluating to true/false. If no pred, assume
-// true pred and include in succeeded set. Returns Pair of sets.
-//
-// Create a NewSet so as not to alter the incoming parameter.
-//
-// Assumption: the input stream has been restored to the starting point
-// prediction, which is where predicates need to evaluate.
-
-type ATNConfigSetPair struct {
- item0, item1 ATNConfigSet
-}
-
-func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet {
- succeeded := NewBaseATNConfigSet(configs.FullContext())
- failed := NewBaseATNConfigSet(configs.FullContext())
-
- for _, c := range configs.GetItems() {
- if c.GetSemanticContext() != SemanticContextNone {
- predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
- if predicateEvaluationResult {
- succeeded.Add(c, nil)
- } else {
- failed.Add(c, nil)
- }
- } else {
- succeeded.Add(c, nil)
- }
- }
- return []ATNConfigSet{succeeded, failed}
-}
-
-// Look through a list of predicate/alt pairs, returning alts for the
-//
-// pairs that win. A {@code NONE} predicate indicates an alt containing an
-// unpredicated config which behaves as "always true." If !complete
-// then we stop at the first predicate that evaluates to true. This
-// includes pairs with nil predicates.
-func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
- predictions := NewBitSet()
- for i := 0; i < len(predPredictions); i++ {
- pair := predPredictions[i]
- if pair.pred == SemanticContextNone {
- predictions.add(pair.alt)
- if !complete {
- break
- }
- continue
- }
-
- predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
- if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
- fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
- }
- if predicateEvaluationResult {
- if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
- fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
- }
- predictions.add(pair.alt)
- if !complete {
- break
- }
- }
- }
- return predictions
-}
-
-func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
- initialDepth := 0
- p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
- fullCtx, initialDepth, treatEOFAsEpsilon)
-}
-
-func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("closure(" + config.String() + ")")
- //fmt.Println("configs(" + configs.String() + ")")
- if config.GetReachesIntoOuterContext() > 50 {
- panic("problem")
- }
- }
-
- if _, ok := config.GetState().(*RuleStopState); ok {
- // We hit rule end. If we have context info, use it
- // run thru all possible stack tops in ctx
- if !config.GetContext().isEmpty() {
- for i := 0; i < config.GetContext().length(); i++ {
- if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
- if fullCtx {
- configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache)
- continue
- } else {
- // we have no context info, just chase follow links (if greedy)
- if ParserATNSimulatorDebug {
- fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
- }
- p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
- }
- continue
- }
- returnState := p.atn.states[config.GetContext().getReturnState(i)]
- newContext := config.GetContext().GetParent(i) // "pop" return state
-
- c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
- // While we have context to pop back from, we may have
- // gotten that context AFTER having falling off a rule.
- // Make sure we track that we are now out of context.
- c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext())
- p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon)
- }
- return
- } else if fullCtx {
- // reached end of start rule
- configs.Add(config, p.mergeCache)
- return
- } else {
- // else if we have no context info, just chase follow links (if greedy)
- if ParserATNSimulatorDebug {
- fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
- }
- }
- }
- p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
-}
-
-// Do the actual work of walking epsilon edges//
-func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
- state := config.GetState()
- // optimization
- if !state.GetEpsilonOnlyTransitions() {
- configs.Add(config, p.mergeCache)
- // make sure to not return here, because EOF transitions can act as
- // both epsilon transitions and non-epsilon transitions.
- }
- for i := 0; i < len(state.GetTransitions()); i++ {
- if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) {
- continue
- }
-
- t := state.GetTransitions()[i]
- _, ok := t.(*ActionTransition)
- continueCollecting := collectPredicates && !ok
- c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
- if ci, ok := c.(*BaseATNConfig); ok && ci != nil {
- newDepth := depth
-
- if _, ok := config.GetState().(*RuleStopState); ok {
- // target fell off end of rule mark resulting c as having dipped into outer context
- // We can't get here if incoming config was rule stop and we had context
- // track how far we dip into outer context. Might
- // come in handy and we avoid evaluating context dependent
- // preds if p is > 0.
-
- if p.dfa != nil && p.dfa.getPrecedenceDfa() {
- if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
- c.setPrecedenceFilterSuppressed(true)
- }
- }
-
- c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
-
- _, present := closureBusy.Put(c)
- if present {
- // avoid infinite recursion for right-recursive rules
- continue
- }
-
- configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method
- newDepth--
- if ParserATNSimulatorDebug {
- fmt.Println("dips into outer ctx: " + c.String())
- }
- } else {
-
- if !t.getIsEpsilon() {
- _, present := closureBusy.Put(c)
- if present {
- // avoid infinite recursion for EOF* and EOF+
- continue
- }
- }
- if _, ok := t.(*RuleTransition); ok {
- // latch when newDepth goes negative - once we step out of the entry context we can't return
- if newDepth >= 0 {
- newDepth++
- }
- }
- }
- p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon)
- }
- }
-}
-
-func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool {
- if TurnOffLRLoopEntryBranchOpt {
- return false
- }
-
- _p := config.GetState()
-
- // First check to see if we are in StarLoopEntryState generated during
- // left-recursion elimination. For efficiency, also check if
- // the context has an empty stack case. If so, it would mean
- // global FOLLOW so we can't perform optimization
- if _p.GetStateType() != ATNStateStarLoopEntry {
- return false
- }
- startLoop, ok := _p.(*StarLoopEntryState)
- if !ok {
- return false
- }
- if !startLoop.precedenceRuleDecision ||
- config.GetContext().isEmpty() ||
- config.GetContext().hasEmptyPath() {
- return false
- }
-
- // Require all return states to return back to the same rule
- // that p is in.
- numCtxs := config.GetContext().length()
- for i := 0; i < numCtxs; i++ {
- returnState := p.atn.states[config.GetContext().getReturnState(i)]
- if returnState.GetRuleIndex() != _p.GetRuleIndex() {
- return false
- }
- }
- x := _p.GetTransitions()[0].getTarget()
- decisionStartState := x.(BlockStartState)
- blockEndStateNum := decisionStartState.getEndState().stateNumber
- blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
-
- // Verify that the top of each stack context leads to loop entry/exit
- // state through epsilon edges and w/o leaving rule.
-
- for i := 0; i < numCtxs; i++ { // for each stack context
- returnStateNumber := config.GetContext().getReturnState(i)
- returnState := p.atn.states[returnStateNumber]
-
- // all states must have single outgoing epsilon edge
- if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() {
- return false
- }
-
- // Look for prefix op case like 'not expr', (' type ')' expr
- returnStateTarget := returnState.GetTransitions()[0].getTarget()
- if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p {
- continue
- }
-
- // Look for 'expr op expr' or case where expr's return state is block end
- // of (...)* internal block; the block end points to loop back
- // which points to p but we don't need to check that
- if returnState == blockEndState {
- continue
- }
-
- // Look for ternary expr ? expr : expr. The return state points at block end,
- // which points at loop entry state
- if returnStateTarget == blockEndState {
- continue
- }
-
- // Look for complex prefix 'between expr and expr' case where 2nd expr's
- // return state points at block end state of (...)* internal block
- if returnStateTarget.GetStateType() == ATNStateBlockEnd &&
- len(returnStateTarget.GetTransitions()) == 1 &&
- returnStateTarget.GetTransitions()[0].getIsEpsilon() &&
- returnStateTarget.GetTransitions()[0].getTarget() == _p {
- continue
- }
-
- // anything else ain't conforming
- return false
- }
-
- return true
-}
-
-func (p *ParserATNSimulator) getRuleName(index int) string {
- if p.parser != nil && index >= 0 {
- return p.parser.GetRuleNames()[index]
- }
- var sb strings.Builder
- sb.Grow(32)
-
- sb.WriteString("')
- return sb.String()
-}
-
-func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig {
-
- switch t.getSerializationType() {
- case TransitionRULE:
- return p.ruleTransition(config, t.(*RuleTransition))
- case TransitionPRECEDENCE:
- return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx)
- case TransitionPREDICATE:
- return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx)
- case TransitionACTION:
- return p.actionTransition(config, t.(*ActionTransition))
- case TransitionEPSILON:
- return NewBaseATNConfig4(config, t.getTarget())
- case TransitionATOM, TransitionRANGE, TransitionSET:
- // EOF transitions act like epsilon transitions after the first EOF
- // transition is traversed
- if treatEOFAsEpsilon {
- if t.Matches(TokenEOF, 0, 1) {
- return NewBaseATNConfig4(config, t.getTarget())
- }
- }
- return nil
- default:
- return nil
- }
-}
-
-func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig {
- if ParserATNSimulatorDebug {
- fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex))
- }
- return NewBaseATNConfig4(config, t.getTarget())
-}
-
-func (p *ParserATNSimulator) precedenceTransition(config ATNConfig,
- pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
-
- if ParserATNSimulatorDebug {
- fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " +
- strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true")
- if p.parser != nil {
- fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
- }
- }
- var c *BaseATNConfig
- if collectPredicates && inContext {
- if fullCtx {
- // In full context mode, we can evaluate predicates on-the-fly
- // during closure, which dramatically reduces the size of
- // the config sets. It also obviates the need to test predicates
- // later during conflict resolution.
- currentPosition := p.input.Index()
- p.input.Seek(p.startIndex)
- predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
- p.input.Seek(currentPosition)
- if predSucceeds {
- c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
- }
- } else {
- newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
- c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
- }
- } else {
- c = NewBaseATNConfig4(config, pt.getTarget())
- }
- if ParserATNSimulatorDebug {
- fmt.Println("config from pred transition=" + c.String())
- }
- return c
-}
-
-func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
-
- if ParserATNSimulatorDebug {
- fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) +
- ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent))
- if p.parser != nil {
- fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
- }
- }
- var c *BaseATNConfig
- if collectPredicates && (!pt.isCtxDependent || inContext) {
- if fullCtx {
- // In full context mode, we can evaluate predicates on-the-fly
- // during closure, which dramatically reduces the size of
- // the config sets. It also obviates the need to test predicates
- // later during conflict resolution.
- currentPosition := p.input.Index()
- p.input.Seek(p.startIndex)
- predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
- p.input.Seek(currentPosition)
- if predSucceeds {
- c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
- }
- } else {
- newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
- c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
- }
- } else {
- c = NewBaseATNConfig4(config, pt.getTarget())
- }
- if ParserATNSimulatorDebug {
- fmt.Println("config from pred transition=" + c.String())
- }
- return c
-}
-
-func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig {
- if ParserATNSimulatorDebug {
- fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String())
- }
- returnState := t.followState
- newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber())
- return NewBaseATNConfig1(config, t.getTarget(), newContext)
-}
-
-func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet {
- altsets := PredictionModegetConflictingAltSubsets(configs)
- return PredictionModeGetAlts(altsets)
-}
-
-// Sam pointed out a problem with the previous definition, v3, of
-// ambiguous states. If we have another state associated with conflicting
-// alternatives, we should keep going. For example, the following grammar
-//
-// s : (ID | ID ID?) ''
-//
-// When the ATN simulation reaches the state before '', it has a DFA
-// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally
-// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node
-// because alternative to has another way to continue, via [6|2|[]].
-// The key is that we have a single state that has config's only associated
-// with a single alternative, 2, and crucially the state transitions
-// among the configurations are all non-epsilon transitions. That means
-// we don't consider any conflicts that include alternative 2. So, we
-// ignore the conflict between alts 1 and 2. We ignore a set of
-// conflicting alts when there is an intersection with an alternative
-// associated with a single alt state in the state&rarrconfig-list map.
-//
-// It's also the case that we might have two conflicting configurations but
-// also a 3rd nonconflicting configuration for a different alternative:
-// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar:
-//
-// a : A | A | A B
-//
-// After Matching input A, we reach the stop state for rule A, state 1.
-// State 8 is the state right before B. Clearly alternatives 1 and 2
-// conflict and no amount of further lookahead will separate the two.
-// However, alternative 3 will be able to continue and so we do not
-// stop working on p state. In the previous example, we're concerned
-// with states associated with the conflicting alternatives. Here alt
-// 3 is not associated with the conflicting configs, but since we can continue
-// looking for input reasonably, I don't declare the state done. We
-// ignore a set of conflicting alts when we have an alternative
-// that we still need to pursue.
-//
-
-func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet {
- var conflictingAlts *BitSet
- if configs.GetUniqueAlt() != ATNInvalidAltNumber {
- conflictingAlts = NewBitSet()
- conflictingAlts.add(configs.GetUniqueAlt())
- } else {
- conflictingAlts = configs.GetConflictingAlts()
- }
- return conflictingAlts
-}
-
-func (p *ParserATNSimulator) GetTokenName(t int) string {
- if t == TokenEOF {
- return "EOF"
- }
-
- if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) {
- return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
- }
-
- if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) {
- return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">"
- }
-
- return strconv.Itoa(t)
-}
-
-func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
- return p.GetTokenName(input.LA(1))
-}
-
-// Used for debugging in AdaptivePredict around execATN but I cut
-//
-// it out for clarity now that alg. works well. We can leave p
-// "dead" code for a bit.
-func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) {
-
- panic("Not implemented")
-
- // fmt.Println("dead end configs: ")
- // var decs = nvae.deadEndConfigs
- //
- // for i:=0; i0) {
- // var t = c.state.GetTransitions()[0]
- // if t2, ok := t.(*AtomTransition); ok {
- // trans = "Atom "+ p.GetTokenName(t2.label)
- // } else if t3, ok := t.(SetTransition); ok {
- // _, ok := t.(*NotSetTransition)
- //
- // var s string
- // if (ok){
- // s = "~"
- // }
- //
- // trans = s + "Set " + t3.set
- // }
- // }
- // fmt.Errorf(c.String(p.parser, true) + ":" + trans)
- // }
-}
-
-func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException {
- return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext)
-}
-
-func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
- alt := ATNInvalidAltNumber
- for _, c := range configs.GetItems() {
- if alt == ATNInvalidAltNumber {
- alt = c.GetAlt() // found first alt
- } else if c.GetAlt() != alt {
- return ATNInvalidAltNumber
- }
- }
- return alt
-}
-
-// Add an edge to the DFA, if possible. This method calls
-// {@link //addDFAState} to ensure the {@code to} state is present in the
-// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
-// range of edges that can be represented in the DFA tables, p method
-// returns without adding the edge to the DFA.
-//
-// If {@code to} is {@code nil}, p method returns {@code nil}.
-// Otherwise, p method returns the {@link DFAState} returned by calling
-// {@link //addDFAState} for the {@code to} state.
-//
-// @param dfa The DFA
-// @param from The source state for the edge
-// @param t The input symbol
-// @param to The target state for the edge
-//
-// @return If {@code to} is {@code nil}, p method returns {@code nil}
-// otherwise p method returns the result of calling {@link //addDFAState}
-// on {@code to}
-func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
- if ParserATNSimulatorDebug {
- fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
- }
- if to == nil {
- return nil
- }
- p.atn.stateMu.Lock()
- to = p.addDFAState(dfa, to) // used existing if possible not incoming
- p.atn.stateMu.Unlock()
- if from == nil || t < -1 || t > p.atn.maxTokenType {
- return to
- }
- p.atn.edgeMu.Lock()
- if from.getEdges() == nil {
- from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1))
- }
- from.setIthEdge(t+1, to) // connect
- p.atn.edgeMu.Unlock()
-
- if ParserATNSimulatorDebug {
- var names []string
- if p.parser != nil {
- names = p.parser.GetLiteralNames()
- }
-
- fmt.Println("DFA=\n" + dfa.String(names, nil))
- }
- return to
-}
-
-// Add state {@code D} to the DFA if it is not already present, and return
-// the actual instance stored in the DFA. If a state equivalent to {@code D}
-// is already in the DFA, the existing state is returned. Otherwise p
-// method returns {@code D} after adding it to the DFA.
-//
-// If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and
-// does not change the DFA.
-//
-// @param dfa The dfa
-// @param D The DFA state to add
-// @return The state stored in the DFA. This will be either the existing
-// state if {@code D} is already in the DFA, or {@code D} itself if the
-// state was not already present.
-func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
- if d == ATNSimulatorError {
- return d
- }
- existing, present := dfa.states.Get(d)
- if present {
- if ParserATNSimulatorTraceATNSim {
- fmt.Print("addDFAState " + d.String() + " exists")
- }
- return existing
- }
-
- // The state was not present, so update it with configs
- //
- d.stateNumber = dfa.states.Len()
- if !d.configs.ReadOnly() {
- d.configs.OptimizeConfigs(p.BaseATNSimulator)
- d.configs.SetReadOnly(true)
- }
- dfa.states.Put(d)
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("addDFAState new " + d.String())
- }
-
- return d
-}
-
-func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) {
- if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
- interval := NewInterval(startIndex, stopIndex+1)
- fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
- ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
- }
- if p.parser != nil {
- p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
- }
-}
-
-func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) {
- if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
- interval := NewInterval(startIndex, stopIndex+1)
- fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
- ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
- }
- if p.parser != nil {
- p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs)
- }
-}
-
-// If context sensitive parsing, we know it's ambiguity not conflict//
-func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int,
- exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
- if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
- interval := NewInterval(startIndex, stopIndex+1)
- fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() +
- ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
- }
- if p.parser != nil {
- p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
- }
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
deleted file mode 100644
index ba62af361..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
+++ /dev/null
@@ -1,806 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "golang.org/x/exp/slices"
- "strconv"
-)
-
-// Represents {@code $} in local context prediction, which means wildcard.
-// {@code//+x =//}.
-// /
-const (
- BasePredictionContextEmptyReturnState = 0x7FFFFFFF
-)
-
-// Represents {@code $} in an array in full context mode, when {@code $}
-// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
-// {@code $} = {@link //EmptyReturnState}.
-// /
-
-var (
- BasePredictionContextglobalNodeCount = 1
- BasePredictionContextid = BasePredictionContextglobalNodeCount
-)
-
-type PredictionContext interface {
- Hash() int
- Equals(interface{}) bool
- GetParent(int) PredictionContext
- getReturnState(int) int
- length() int
- isEmpty() bool
- hasEmptyPath() bool
- String() string
-}
-
-type BasePredictionContext struct {
- cachedHash int
-}
-
-func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
- pc := new(BasePredictionContext)
- pc.cachedHash = cachedHash
-
- return pc
-}
-
-func (b *BasePredictionContext) isEmpty() bool {
- return false
-}
-
-func calculateHash(parent PredictionContext, returnState int) int {
- h := murmurInit(1)
- h = murmurUpdate(h, parent.Hash())
- h = murmurUpdate(h, returnState)
- return murmurFinish(h, 2)
-}
-
-var _emptyPredictionContextHash int
-
-func init() {
- _emptyPredictionContextHash = murmurInit(1)
- _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
-}
-
-func calculateEmptyHash() int {
- return _emptyPredictionContextHash
-}
-
-// Used to cache {@link BasePredictionContext} objects. Its used for the shared
-// context cash associated with contexts in DFA states. This cache
-// can be used for both lexers and parsers.
-
-type PredictionContextCache struct {
- cache map[PredictionContext]PredictionContext
-}
-
-func NewPredictionContextCache() *PredictionContextCache {
- t := new(PredictionContextCache)
- t.cache = make(map[PredictionContext]PredictionContext)
- return t
-}
-
-// Add a context to the cache and return it. If the context already exists,
-// return that one instead and do not add a Newcontext to the cache.
-// Protect shared cache from unsafe thread access.
-func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
- if ctx == BasePredictionContextEMPTY {
- return BasePredictionContextEMPTY
- }
- existing := p.cache[ctx]
- if existing != nil {
- return existing
- }
- p.cache[ctx] = ctx
- return ctx
-}
-
-func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
- return p.cache[ctx]
-}
-
-func (p *PredictionContextCache) length() int {
- return len(p.cache)
-}
-
-type SingletonPredictionContext interface {
- PredictionContext
-}
-
-type BaseSingletonPredictionContext struct {
- *BasePredictionContext
-
- parentCtx PredictionContext
- returnState int
-}
-
-func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
- var cachedHash int
- if parent != nil {
- cachedHash = calculateHash(parent, returnState)
- } else {
- cachedHash = calculateEmptyHash()
- }
-
- s := new(BaseSingletonPredictionContext)
- s.BasePredictionContext = NewBasePredictionContext(cachedHash)
-
- s.parentCtx = parent
- s.returnState = returnState
-
- return s
-}
-
-func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
- if returnState == BasePredictionContextEmptyReturnState && parent == nil {
- // someone can pass in the bits of an array ctx that mean $
- return BasePredictionContextEMPTY
- }
-
- return NewBaseSingletonPredictionContext(parent, returnState)
-}
-
-func (b *BaseSingletonPredictionContext) length() int {
- return 1
-}
-
-func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
- return b.parentCtx
-}
-
-func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
- return b.returnState
-}
-
-func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
- return b.returnState == BasePredictionContextEmptyReturnState
-}
-
-func (b *BaseSingletonPredictionContext) Hash() int {
- return b.cachedHash
-}
-
-func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool {
- if b == other {
- return true
- }
- if _, ok := other.(*BaseSingletonPredictionContext); !ok {
- return false
- }
-
- otherP := other.(*BaseSingletonPredictionContext)
-
- if b.returnState != otherP.getReturnState(0) {
- return false
- }
- if b.parentCtx == nil {
- return otherP.parentCtx == nil
- }
-
- return b.parentCtx.Equals(otherP.parentCtx)
-}
-
-func (b *BaseSingletonPredictionContext) String() string {
- var up string
-
- if b.parentCtx == nil {
- up = ""
- } else {
- up = b.parentCtx.String()
- }
-
- if len(up) == 0 {
- if b.returnState == BasePredictionContextEmptyReturnState {
- return "$"
- }
-
- return strconv.Itoa(b.returnState)
- }
-
- return strconv.Itoa(b.returnState) + " " + up
-}
-
-var BasePredictionContextEMPTY = NewEmptyPredictionContext()
-
-type EmptyPredictionContext struct {
- *BaseSingletonPredictionContext
-}
-
-func NewEmptyPredictionContext() *EmptyPredictionContext {
-
- p := new(EmptyPredictionContext)
-
- p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
- p.cachedHash = calculateEmptyHash()
- return p
-}
-
-func (e *EmptyPredictionContext) isEmpty() bool {
- return true
-}
-
-func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
- return nil
-}
-
-func (e *EmptyPredictionContext) getReturnState(index int) int {
- return e.returnState
-}
-
-func (e *EmptyPredictionContext) Hash() int {
- return e.cachedHash
-}
-
-func (e *EmptyPredictionContext) Equals(other interface{}) bool {
- return e == other
-}
-
-func (e *EmptyPredictionContext) String() string {
- return "$"
-}
-
-type ArrayPredictionContext struct {
- *BasePredictionContext
-
- parents []PredictionContext
- returnStates []int
-}
-
-func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
- // Parent can be nil only if full ctx mode and we make an array
- // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
- // nil parent and
- // returnState == {@link //EmptyReturnState}.
- hash := murmurInit(1)
-
- for _, parent := range parents {
- hash = murmurUpdate(hash, parent.Hash())
- }
-
- for _, returnState := range returnStates {
- hash = murmurUpdate(hash, returnState)
- }
-
- hash = murmurFinish(hash, len(parents)<<1)
-
- c := new(ArrayPredictionContext)
- c.BasePredictionContext = NewBasePredictionContext(hash)
-
- c.parents = parents
- c.returnStates = returnStates
-
- return c
-}
-
-func (a *ArrayPredictionContext) GetReturnStates() []int {
- return a.returnStates
-}
-
-func (a *ArrayPredictionContext) hasEmptyPath() bool {
- return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
-}
-
-func (a *ArrayPredictionContext) isEmpty() bool {
- // since EmptyReturnState can only appear in the last position, we
- // don't need to verify that size==1
- return a.returnStates[0] == BasePredictionContextEmptyReturnState
-}
-
-func (a *ArrayPredictionContext) length() int {
- return len(a.returnStates)
-}
-
-func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
- return a.parents[index]
-}
-
-func (a *ArrayPredictionContext) getReturnState(index int) int {
- return a.returnStates[index]
-}
-
-// Equals is the default comparison function for ArrayPredictionContext when no specialized
-// implementation is needed for a collection
-func (a *ArrayPredictionContext) Equals(o interface{}) bool {
- if a == o {
- return true
- }
- other, ok := o.(*ArrayPredictionContext)
- if !ok {
- return false
- }
- if a.cachedHash != other.Hash() {
- return false // can't be same if hash is different
- }
-
- // Must compare the actual array elements and not just the array address
- //
- return slices.Equal(a.returnStates, other.returnStates) &&
- slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool {
- return x.Equals(y)
- })
-}
-
-// Hash is the default hash function for ArrayPredictionContext when no specialized
-// implementation is needed for a collection
-func (a *ArrayPredictionContext) Hash() int {
- return a.BasePredictionContext.cachedHash
-}
-
-func (a *ArrayPredictionContext) String() string {
- if a.isEmpty() {
- return "[]"
- }
-
- s := "["
- for i := 0; i < len(a.returnStates); i++ {
- if i > 0 {
- s = s + ", "
- }
- if a.returnStates[i] == BasePredictionContextEmptyReturnState {
- s = s + "$"
- continue
- }
- s = s + strconv.Itoa(a.returnStates[i])
- if a.parents[i] != nil {
- s = s + " " + a.parents[i].String()
- } else {
- s = s + "nil"
- }
- }
-
- return s + "]"
-}
-
-// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
-// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
-// /
-func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
- if outerContext == nil {
- outerContext = ParserRuleContextEmpty
- }
- // if we are in RuleContext of start rule, s, then BasePredictionContext
- // is EMPTY. Nobody called us. (if we are empty, return empty)
- if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
- return BasePredictionContextEMPTY
- }
- // If we have a parent, convert it to a BasePredictionContext graph
- parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
- state := a.states[outerContext.GetInvokingState()]
- transition := state.GetTransitions()[0]
-
- return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
-}
-
-func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
-
- // Share same graph if both same
- //
- if a == b || a.Equals(b) {
- return a
- }
-
- // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test
- // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created
- // from it.
- // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion
- // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from
- // either of them.
-
- ac, ok1 := a.(*BaseSingletonPredictionContext)
- bc, ok2 := b.(*BaseSingletonPredictionContext)
-
- if ok1 && ok2 {
- return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
- }
- // At least one of a or b is array
- // If one is $ and rootIsWildcard, return $ as// wildcard
- if rootIsWildcard {
- if _, ok := a.(*EmptyPredictionContext); ok {
- return a
- }
- if _, ok := b.(*EmptyPredictionContext); ok {
- return b
- }
- }
-
- // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters
- // here.
- //
- // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here
-
- var arp, arb *ArrayPredictionContext
- var ok bool
- if arp, ok = a.(*ArrayPredictionContext); ok {
- } else if _, ok = a.(*BaseSingletonPredictionContext); ok {
- arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
- } else if _, ok = a.(*EmptyPredictionContext); ok {
- arp = NewArrayPredictionContext([]PredictionContext{}, []int{})
- }
-
- if arb, ok = b.(*ArrayPredictionContext); ok {
- } else if _, ok = b.(*BaseSingletonPredictionContext); ok {
- arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
- } else if _, ok = b.(*EmptyPredictionContext); ok {
- arb = NewArrayPredictionContext([]PredictionContext{}, []int{})
- }
-
- // Both arp and arb
- return mergeArrays(arp, arb, rootIsWildcard, mergeCache)
-}
-
-// Merge two {@link SingletonBasePredictionContext} instances.
-//
-// Stack tops equal, parents merge is same return left graph.
-//
-//
-// Same stack top, parents differ merge parents giving array node, then
-// remainders of those graphs. A Newroot node is created to point to the
-// merged parents.
-//
-//
-// Different stack tops pointing to same parent. Make array node for the
-// root where both element in the root point to the same (original)
-// parent.
-//
-//
-// Different stack tops pointing to different parents. Make array node for
-// the root where each element points to the corresponding original
-// parent.
-//
-//
-// @param a the first {@link SingletonBasePredictionContext}
-// @param b the second {@link SingletonBasePredictionContext}
-// @param rootIsWildcard {@code true} if this is a local-context merge,
-// otherwise false to indicate a full-context merge
-// @param mergeCache
-// /
-func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
- if mergeCache != nil {
- previous := mergeCache.Get(a.Hash(), b.Hash())
- if previous != nil {
- return previous.(PredictionContext)
- }
- previous = mergeCache.Get(b.Hash(), a.Hash())
- if previous != nil {
- return previous.(PredictionContext)
- }
- }
-
- rootMerge := mergeRoot(a, b, rootIsWildcard)
- if rootMerge != nil {
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), rootMerge)
- }
- return rootMerge
- }
- if a.returnState == b.returnState {
- parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
- // if parent is same as existing a or b parent or reduced to a parent,
- // return it
- if parent == a.parentCtx {
- return a // ax + bx = ax, if a=b
- }
- if parent == b.parentCtx {
- return b // ax + bx = bx, if a=b
- }
- // else: ax + ay = a'[x,y]
- // merge parents x and y, giving array node with x,y then remainders
- // of those graphs. dup a, a' points at merged array
- // Newjoined parent so create Newsingleton pointing to it, a'
- spc := SingletonBasePredictionContextCreate(parent, a.returnState)
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), spc)
- }
- return spc
- }
- // a != b payloads differ
- // see if we can collapse parents due to $+x parents if local ctx
- var singleParent PredictionContext
- if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
- // bx =
- // [a,b]x
- singleParent = a.parentCtx
- }
- if singleParent != nil { // parents are same
- // sort payloads and use same parent
- payloads := []int{a.returnState, b.returnState}
- if a.returnState > b.returnState {
- payloads[0] = b.returnState
- payloads[1] = a.returnState
- }
- parents := []PredictionContext{singleParent, singleParent}
- apc := NewArrayPredictionContext(parents, payloads)
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), apc)
- }
- return apc
- }
- // parents differ and can't merge them. Just pack together
- // into array can't merge.
- // ax + by = [ax,by]
- payloads := []int{a.returnState, b.returnState}
- parents := []PredictionContext{a.parentCtx, b.parentCtx}
- if a.returnState > b.returnState { // sort by payload
- payloads[0] = b.returnState
- payloads[1] = a.returnState
- parents = []PredictionContext{b.parentCtx, a.parentCtx}
- }
- apc := NewArrayPredictionContext(parents, payloads)
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), apc)
- }
- return apc
-}
-
-// Handle case where at least one of {@code a} or {@code b} is
-// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
-// to represent {@link //EMPTY}.
-//
-// Local-Context Merges
-//
-// These local-context merge operations are used when {@code rootIsWildcard}
-// is true.
-//
-// {@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//
-//
-// {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
-// {@code //EMPTY} return left graph.
-//
-//
-// Special case of last merge if local context.
-//
-//
-// Full-Context Merges
-//
-// These full-context merge operations are used when {@code rootIsWildcard}
-// is false.
-//
-//
-//
-// Must keep all contexts {@link //EMPTY} in array is a special value (and
-// nil parent).
-//
-//
-//
-//
-// @param a the first {@link SingletonBasePredictionContext}
-// @param b the second {@link SingletonBasePredictionContext}
-// @param rootIsWildcard {@code true} if this is a local-context merge,
-// otherwise false to indicate a full-context merge
-// /
-func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
- if rootIsWildcard {
- if a == BasePredictionContextEMPTY {
- return BasePredictionContextEMPTY // // + b =//
- }
- if b == BasePredictionContextEMPTY {
- return BasePredictionContextEMPTY // a +// =//
- }
- } else {
- if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
- return BasePredictionContextEMPTY // $ + $ = $
- } else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
- payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
- parents := []PredictionContext{b.GetParent(-1), nil}
- return NewArrayPredictionContext(parents, payloads)
- } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
- payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
- parents := []PredictionContext{a.GetParent(-1), nil}
- return NewArrayPredictionContext(parents, payloads)
- }
- }
- return nil
-}
-
-// Merge two {@link ArrayBasePredictionContext} instances.
-//
-// Different tops, different parents.
-//
-//
-// Shared top, same parents.
-//
-//
-// Shared top, different parents.
-//
-//
-// Shared top, all shared parents.
-//
-//
-// Equal tops, merge parents and reduce top to
-// {@link SingletonBasePredictionContext}.
-//
-// /
-func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
- if mergeCache != nil {
- previous := mergeCache.Get(a.Hash(), b.Hash())
- if previous != nil {
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
- }
- return previous.(PredictionContext)
- }
- previous = mergeCache.Get(b.Hash(), a.Hash())
- if previous != nil {
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
- }
- return previous.(PredictionContext)
- }
- }
- // merge sorted payloads a + b => M
- i := 0 // walks a
- j := 0 // walks b
- k := 0 // walks target M array
-
- mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
- mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
- // walk and merge to yield mergedParents, mergedReturnStates
- for i < len(a.returnStates) && j < len(b.returnStates) {
- aParent := a.parents[i]
- bParent := b.parents[j]
- if a.returnStates[i] == b.returnStates[j] {
- // same payload (stack tops are equal), must yield merged singleton
- payload := a.returnStates[i]
- // $+$ = $
- bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
- axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax
- // ->
- // ax
- if bothDollars || axAX {
- mergedParents[k] = aParent // choose left
- mergedReturnStates[k] = payload
- } else { // ax+ay -> a'[x,y]
- mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
- mergedParents[k] = mergedParent
- mergedReturnStates[k] = payload
- }
- i++ // hop over left one as usual
- j++ // but also Skip one in right side since we merge
- } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
- mergedParents[k] = aParent
- mergedReturnStates[k] = a.returnStates[i]
- i++
- } else { // b > a, copy b[j] to M
- mergedParents[k] = bParent
- mergedReturnStates[k] = b.returnStates[j]
- j++
- }
- k++
- }
- // copy over any payloads remaining in either array
- if i < len(a.returnStates) {
- for p := i; p < len(a.returnStates); p++ {
- mergedParents[k] = a.parents[p]
- mergedReturnStates[k] = a.returnStates[p]
- k++
- }
- } else {
- for p := j; p < len(b.returnStates); p++ {
- mergedParents[k] = b.parents[p]
- mergedReturnStates[k] = b.returnStates[p]
- k++
- }
- }
- // trim merged if we combined a few that had same stack tops
- if k < len(mergedParents) { // write index < last position trim
- if k == 1 { // for just one merged element, return singleton top
- pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), pc)
- }
- return pc
- }
- mergedParents = mergedParents[0:k]
- mergedReturnStates = mergedReturnStates[0:k]
- }
-
- M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
-
- // if we created same array as a or b, return that instead
- // TODO: track whether this is possible above during merge sort for speed
- // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems
- if M == a {
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), a)
- }
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
- }
- return a
- }
- if M == b {
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), b)
- }
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
- }
- return b
- }
- combineCommonParents(mergedParents)
-
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), M)
- }
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
- }
- return M
-}
-
-// Make pass over all M {@code parents} merge any {@code equals()}
-// ones.
-// /
-func combineCommonParents(parents []PredictionContext) {
- uniqueParents := make(map[PredictionContext]PredictionContext)
-
- for p := 0; p < len(parents); p++ {
- parent := parents[p]
- if uniqueParents[parent] == nil {
- uniqueParents[parent] = parent
- }
- }
- for q := 0; q < len(parents); q++ {
- parents[q] = uniqueParents[parents[q]]
- }
-}
-
-func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
-
- if context.isEmpty() {
- return context
- }
- existing := visited[context]
- if existing != nil {
- return existing
- }
- existing = contextCache.Get(context)
- if existing != nil {
- visited[context] = existing
- return existing
- }
- changed := false
- parents := make([]PredictionContext, context.length())
- for i := 0; i < len(parents); i++ {
- parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
- if changed || parent != context.GetParent(i) {
- if !changed {
- parents = make([]PredictionContext, context.length())
- for j := 0; j < context.length(); j++ {
- parents[j] = context.GetParent(j)
- }
- changed = true
- }
- parents[i] = parent
- }
- }
- if !changed {
- contextCache.add(context)
- visited[context] = context
- return context
- }
- var updated PredictionContext
- if len(parents) == 0 {
- updated = BasePredictionContextEMPTY
- } else if len(parents) == 1 {
- updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
- } else {
- updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
- }
- contextCache.add(updated)
- visited[updated] = updated
- visited[context] = updated
-
- return updated
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
deleted file mode 100644
index 7b9b72fab..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
+++ /dev/null
@@ -1,529 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// This enumeration defines the prediction modes available in ANTLR 4 along with
-// utility methods for analyzing configuration sets for conflicts and/or
-// ambiguities.
-
-const (
- //
- // The SLL(*) prediction mode. This prediction mode ignores the current
- // parser context when making predictions. This is the fastest prediction
- // mode, and provides correct results for many grammars. This prediction
- // mode is more powerful than the prediction mode provided by ANTLR 3, but
- // may result in syntax errors for grammar and input combinations which are
- // not SLL.
- //
- //
- // When using this prediction mode, the parser will either return a correct
- // parse tree (i.e. the same parse tree that would be returned with the
- // {@link //LL} prediction mode), or it will Report a syntax error. If a
- // syntax error is encountered when using the {@link //SLL} prediction mode,
- // it may be due to either an actual syntax error in the input or indicate
- // that the particular combination of grammar and input requires the more
- // powerful {@link //LL} prediction abilities to complete successfully.
- //
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeSLL = 0
- //
- // The LL(*) prediction mode. This prediction mode allows the current parser
- // context to be used for resolving SLL conflicts that occur during
- // prediction. This is the fastest prediction mode that guarantees correct
- // parse results for all combinations of grammars with syntactically correct
- // inputs.
- //
- //
- // When using this prediction mode, the parser will make correct decisions
- // for all syntactically-correct grammar and input combinations. However, in
- // cases where the grammar is truly ambiguous this prediction mode might not
- // Report a precise answer for exactly which alternatives are
- // ambiguous.
- //
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeLL = 1
- //
- // The LL(*) prediction mode with exact ambiguity detection. In addition to
- // the correctness guarantees provided by the {@link //LL} prediction mode,
- // this prediction mode instructs the prediction algorithm to determine the
- // complete and exact set of ambiguous alternatives for every ambiguous
- // decision encountered while parsing.
- //
- //
- // This prediction mode may be used for diagnosing ambiguities during
- // grammar development. Due to the performance overhead of calculating sets
- // of ambiguous alternatives, this prediction mode should be avoided when
- // the exact results are not necessary.
- //
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeLLExactAmbigDetection = 2
-)
-
-// Computes the SLL prediction termination condition.
-//
-//
-// This method computes the SLL prediction termination condition for both of
-// the following cases.
-//
-//
-// - The usual SLL+LL fallback upon SLL conflict
-// - Pure SLL without LL fallback
-//
-//
-// COMBINED SLL+LL PARSING
-//
-// When LL-fallback is enabled upon SLL conflict, correct predictions are
-// ensured regardless of how the termination condition is computed by this
-// method. Due to the substantially higher cost of LL prediction, the
-// prediction should only fall back to LL when the additional lookahead
-// cannot lead to a unique SLL prediction.
-//
-// Assuming combined SLL+LL parsing, an SLL configuration set with only
-// conflicting subsets should fall back to full LL, even if the
-// configuration sets don't resolve to the same alternative (e.g.
-// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
-// configuration, SLL could continue with the hopes that more lookahead will
-// resolve via one of those non-conflicting configurations.
-//
-// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
-// stops when it sees only conflicting configuration subsets. In contrast,
-// full LL keeps going when there is uncertainty.
-//
-// HEURISTIC
-//
-// As a heuristic, we stop prediction when we see any conflicting subset
-// unless we see a state that only has one alternative associated with it.
-// The single-alt-state thing lets prediction continue upon rules like
-// (otherwise, it would admit defeat too soon):
-//
-// {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }
-//
-// When the ATN simulation reaches the state before {@code ”}, it has a
-// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
-// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
-// processing this node because alternative to has another way to continue,
-// via {@code [6|2|[]]}.
-//
-// It also let's us continue for this rule:
-//
-// {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }
-//
-// After Matching input A, we reach the stop state for rule A, state 1.
-// State 8 is the state right before B. Clearly alternatives 1 and 2
-// conflict and no amount of further lookahead will separate the two.
-// However, alternative 3 will be able to continue and so we do not stop
-// working on this state. In the previous example, we're concerned with
-// states associated with the conflicting alternatives. Here alt 3 is not
-// associated with the conflicting configs, but since we can continue
-// looking for input reasonably, don't declare the state done.
-//
-// PURE SLL PARSING
-//
-// To handle pure SLL parsing, all we have to do is make sure that we
-// combine stack contexts for configurations that differ only by semantic
-// predicate. From there, we can do the usual SLL termination heuristic.
-//
-// PREDICATES IN SLL+LL PARSING
-//
-// SLL decisions don't evaluate predicates until after they reach DFA stop
-// states because they need to create the DFA cache that works in all
-// semantic situations. In contrast, full LL evaluates predicates collected
-// during start state computation so it can ignore predicates thereafter.
-// This means that SLL termination detection can totally ignore semantic
-// predicates.
-//
-// Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
-// semantic predicate contexts so we might see two configurations like the
-// following.
-//
-// {@code (s, 1, x, {}), (s, 1, x', {p})}
-//
-// Before testing these configurations against others, we have to merge
-// {@code x} and {@code x'} (without modifying the existing configurations).
-// For example, we test {@code (x+x')==x”} when looking for conflicts in
-// the following configurations.
-//
-// {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}
-//
-// If the configuration set has predicates (as indicated by
-// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
-// the configurations to strip out all of the predicates so that a standard
-// {@link ATNConfigSet} will merge everything ignoring predicates.
-func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
- // Configs in rule stop states indicate reaching the end of the decision
- // rule (local context) or end of start rule (full context). If all
- // configs meet this condition, then none of the configurations is able
- // to Match additional input so we terminate prediction.
- //
- if PredictionModeallConfigsInRuleStopStates(configs) {
- return true
- }
- // pure SLL mode parsing
- if mode == PredictionModeSLL {
- // Don't bother with combining configs from different semantic
- // contexts if we can fail over to full LL costs more time
- // since we'll often fail over anyway.
- if configs.HasSemanticContext() {
- // dup configs, tossing out semantic predicates
- dup := NewBaseATNConfigSet(false)
- for _, c := range configs.GetItems() {
-
- // NewBaseATNConfig({semanticContext:}, c)
- c = NewBaseATNConfig2(c, SemanticContextNone)
- dup.Add(c, nil)
- }
- configs = dup
- }
- // now we have combined contexts for configs with dissimilar preds
- }
- // pure SLL or combined SLL+LL mode parsing
- altsets := PredictionModegetConflictingAltSubsets(configs)
- return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
-}
-
-// Checks if any configuration in {@code configs} is in a
-// {@link RuleStopState}. Configurations meeting this condition have reached
-// the end of the decision rule (local context) or end of start rule (full
-// context).
-//
-// @param configs the configuration set to test
-// @return {@code true} if any configuration in {@code configs} is in a
-// {@link RuleStopState}, otherwise {@code false}
-func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
- for _, c := range configs.GetItems() {
- if _, ok := c.GetState().(*RuleStopState); ok {
- return true
- }
- }
- return false
-}
-
-// Checks if all configurations in {@code configs} are in a
-// {@link RuleStopState}. Configurations meeting this condition have reached
-// the end of the decision rule (local context) or end of start rule (full
-// context).
-//
-// @param configs the configuration set to test
-// @return {@code true} if all configurations in {@code configs} are in a
-// {@link RuleStopState}, otherwise {@code false}
-func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
-
- for _, c := range configs.GetItems() {
- if _, ok := c.GetState().(*RuleStopState); !ok {
- return false
- }
- }
- return true
-}
-
-// Full LL prediction termination.
-//
-// Can we stop looking ahead during ATN simulation or is there some
-// uncertainty as to which alternative we will ultimately pick, after
-// consuming more input? Even if there are partial conflicts, we might know
-// that everything is going to resolve to the same minimum alternative. That
-// means we can stop since no more lookahead will change that fact. On the
-// other hand, there might be multiple conflicts that resolve to different
-// minimums. That means we need more look ahead to decide which of those
-// alternatives we should predict.
-//
-// The basic idea is to split the set of configurations {@code C}, into
-// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
-// non-conflicting configurations. Two configurations conflict if they have
-// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
-// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
-// and {@code (s, j, ctx, _)} for {@code i!=j}.
-//
-// Reduce these configuration subsets to the set of possible alternatives.
-// You can compute the alternative subsets in one pass as follows:
-//
-// {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
-// {@code C} holding {@code s} and {@code ctx} fixed.
-//
-// Or in pseudo-code, for each configuration {@code c} in {@code C}:
-//
-//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-//
-//
-// The values in {@code map} are the set of {@code A_s,ctx} sets.
-//
-// If {@code |A_s,ctx|=1} then there is no conflict associated with
-// {@code s} and {@code ctx}.
-//
-// Reduce the subsets to singletons by choosing a minimum of each subset. If
-// the union of these alternative subsets is a singleton, then no amount of
-// more lookahead will help us. We will always pick that alternative. If,
-// however, there is more than one alternative, then we are uncertain which
-// alternative to predict and must continue looking for resolution. We may
-// or may not discover an ambiguity in the future, even if there are no
-// conflicting subsets this round.
-//
-// The biggest sin is to terminate early because it means we've made a
-// decision but were uncertain as to the eventual outcome. We haven't used
-// enough lookahead. On the other hand, announcing a conflict too late is no
-// big deal you will still have the conflict. It's just inefficient. It
-// might even look until the end of file.
-//
-// No special consideration for semantic predicates is required because
-// predicates are evaluated on-the-fly for full LL prediction, ensuring that
-// no configuration contains a semantic context during the termination
-// check.
-//
-// CONFLICTING CONFIGS
-//
-// Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
-// when {@code i!=j} but {@code x=x'}. Because we merge all
-// {@code (s, i, _)} configurations together, that means that there are at
-// most {@code n} configurations associated with state {@code s} for
-// {@code n} possible alternatives in the decision. The merged stacks
-// complicate the comparison of configuration contexts {@code x} and
-// {@code x'}. Sam checks to see if one is a subset of the other by calling
-// merge and checking to see if the merged result is either {@code x} or
-// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
-// is the superset, then {@code i} is the only possible prediction since the
-// others resolve to {@code min(i)} as well. However, if {@code x} is
-// associated with {@code j>i} then at least one stack configuration for
-// {@code j} is not in conflict with alternative {@code i}. The algorithm
-// should keep going, looking for more lookahead due to the uncertainty.
-//
-// For simplicity, I'm doing a equality check between {@code x} and
-// {@code x'} that lets the algorithm continue to consume lookahead longer
-// than necessary. The reason I like the equality is of course the
-// simplicity but also because that is the test you need to detect the
-// alternatives that are actually in conflict.
-//
-// CONTINUE/STOP RULE
-//
-// Continue if union of resolved alternative sets from non-conflicting and
-// conflicting alternative subsets has more than one alternative. We are
-// uncertain about which alternative to predict.
-//
-// The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
-// alternatives are still in the running for the amount of input we've
-// consumed at this point. The conflicting sets let us to strip away
-// configurations that won't lead to more states because we resolve
-// conflicts to the configuration with a minimum alternate for the
-// conflicting set.
-//
-// CASES
-//
-//
-//
-// - no conflicts and more than 1 alternative in set => continue
-//
-// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
-// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
-// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
-// {@code {1,3}} => continue
-//
-//
-// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
-// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set
-// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
-// {@code {1}} => stop and predict 1
-//
-// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
-// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
-// {@code {1}} = {@code {1}} => stop and predict 1, can announce
-// ambiguity {@code {1,2}}
-//
-// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
-// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
-// {@code {2}} = {@code {1,2}} => continue
-//
-// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
-// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
-// {@code {3}} = {@code {1,3}} => continue
-//
-//
-//
-// EXACT AMBIGUITY DETECTION
-//
-// If all states Report the same conflicting set of alternatives, then we
-// know we have the exact ambiguity set.
-//
-// |A_i|>1 and
-// A_i = A_j for all i, j.
-//
-// In other words, we continue examining lookahead until all {@code A_i}
-// have more than one alternative and all {@code A_i} are the same. If
-// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
-// because the resolved set is {@code {1}}. To determine what the real
-// ambiguity is, we have to know whether the ambiguity is between one and
-// two or one and three so we keep going. We can only stop prediction when
-// we need exact ambiguity detection when the sets look like
-// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
-func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
- return PredictionModegetSingleViableAlt(altsets)
-}
-
-// Determines if every alternative subset in {@code altsets} contains more
-// than one alternative.
-//
-// @param altsets a collection of alternative subsets
-// @return {@code true} if every {@link BitSet} in {@code altsets} has
-// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
-func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
- return !PredictionModehasNonConflictingAltSet(altsets)
-}
-
-// Determines if any single alternative subset in {@code altsets} contains
-// exactly one alternative.
-//
-// @param altsets a collection of alternative subsets
-// @return {@code true} if {@code altsets} contains a {@link BitSet} with
-// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
-func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if alts.length() == 1 {
- return true
- }
- }
- return false
-}
-
-// Determines if any single alternative subset in {@code altsets} contains
-// more than one alternative.
-//
-// @param altsets a collection of alternative subsets
-// @return {@code true} if {@code altsets} contains a {@link BitSet} with
-// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
-func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if alts.length() > 1 {
- return true
- }
- }
- return false
-}
-
-// Determines if every alternative subset in {@code altsets} is equivalent.
-//
-// @param altsets a collection of alternative subsets
-// @return {@code true} if every member of {@code altsets} is equal to the
-// others, otherwise {@code false}
-func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
- var first *BitSet
-
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if first == nil {
- first = alts
- } else if alts != first {
- return false
- }
- }
-
- return true
-}
-
-// Returns the unique alternative predicted by all alternative subsets in
-// {@code altsets}. If no such alternative exists, this method returns
-// {@link ATN//INVALID_ALT_NUMBER}.
-//
-// @param altsets a collection of alternative subsets
-func PredictionModegetUniqueAlt(altsets []*BitSet) int {
- all := PredictionModeGetAlts(altsets)
- if all.length() == 1 {
- return all.minValue()
- }
-
- return ATNInvalidAltNumber
-}
-
-// Gets the complete set of represented alternatives for a collection of
-// alternative subsets. This method returns the union of each {@link BitSet}
-// in {@code altsets}.
-//
-// @param altsets a collection of alternative subsets
-// @return the set of represented alternatives in {@code altsets}
-func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
- all := NewBitSet()
- for _, alts := range altsets {
- all.or(alts)
- }
- return all
-}
-
-// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
-// For each configuration {@code c} in {@code configs}:
-//
-//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-//
-func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
- configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst)
-
- for _, c := range configs.GetItems() {
-
- alts, ok := configToAlts.Get(c)
- if !ok {
- alts = NewBitSet()
- configToAlts.Put(c, alts)
- }
- alts.add(c.GetAlt())
- }
-
- return configToAlts.Values()
-}
-
-// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each
-// configuration {@code c} in {@code configs}:
-//
-//
-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
-//
-func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
- m := NewAltDict()
-
- for _, c := range configs.GetItems() {
- alts := m.Get(c.GetState().String())
- if alts == nil {
- alts = NewBitSet()
- m.put(c.GetState().String(), alts)
- }
- alts.(*BitSet).add(c.GetAlt())
- }
- return m
-}
-
-func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
- values := PredictionModeGetStateToAltMap(configs).values()
- for i := 0; i < len(values); i++ {
- if values[i].(*BitSet).length() == 1 {
- return true
- }
- }
- return false
-}
-
-func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
- result := ATNInvalidAltNumber
-
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- minAlt := alts.minValue()
- if result == ATNInvalidAltNumber {
- result = minAlt
- } else if result != minAlt { // more than 1 viable alt
- return ATNInvalidAltNumber
- }
- }
- return result
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
deleted file mode 100644
index bfe542d09..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strings"
-
- "strconv"
-)
-
-type Recognizer interface {
- GetLiteralNames() []string
- GetSymbolicNames() []string
- GetRuleNames() []string
-
- Sempred(RuleContext, int, int) bool
- Precpred(RuleContext, int) bool
-
- GetState() int
- SetState(int)
- Action(RuleContext, int, int)
- AddErrorListener(ErrorListener)
- RemoveErrorListeners()
- GetATN() *ATN
- GetErrorListenerDispatch() ErrorListener
-}
-
-type BaseRecognizer struct {
- listeners []ErrorListener
- state int
-
- RuleNames []string
- LiteralNames []string
- SymbolicNames []string
- GrammarFileName string
-}
-
-func NewBaseRecognizer() *BaseRecognizer {
- rec := new(BaseRecognizer)
- rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
- rec.state = -1
- return rec
-}
-
-var tokenTypeMapCache = make(map[string]int)
-var ruleIndexMapCache = make(map[string]int)
-
-func (b *BaseRecognizer) checkVersion(toolVersion string) {
- runtimeVersion := "4.12.0"
- if runtimeVersion != toolVersion {
- fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
- }
-}
-
-func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
- panic("action not implemented on Recognizer!")
-}
-
-func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
- b.listeners = append(b.listeners, listener)
-}
-
-func (b *BaseRecognizer) RemoveErrorListeners() {
- b.listeners = make([]ErrorListener, 0)
-}
-
-func (b *BaseRecognizer) GetRuleNames() []string {
- return b.RuleNames
-}
-
-func (b *BaseRecognizer) GetTokenNames() []string {
- return b.LiteralNames
-}
-
-func (b *BaseRecognizer) GetSymbolicNames() []string {
- return b.SymbolicNames
-}
-
-func (b *BaseRecognizer) GetLiteralNames() []string {
- return b.LiteralNames
-}
-
-func (b *BaseRecognizer) GetState() int {
- return b.state
-}
-
-func (b *BaseRecognizer) SetState(v int) {
- b.state = v
-}
-
-//func (b *Recognizer) GetTokenTypeMap() {
-// var tokenNames = b.GetTokenNames()
-// if (tokenNames==nil) {
-// panic("The current recognizer does not provide a list of token names.")
-// }
-// var result = tokenTypeMapCache[tokenNames]
-// if(result==nil) {
-// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
-// result.EOF = TokenEOF
-// tokenTypeMapCache[tokenNames] = result
-// }
-// return result
-//}
-
-// Get a map from rule names to rule indexes.
-//
-// Used for XPath and tree pattern compilation.
-func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
-
- panic("Method not defined!")
- // var ruleNames = b.GetRuleNames()
- // if (ruleNames==nil) {
- // panic("The current recognizer does not provide a list of rule names.")
- // }
- //
- // var result = ruleIndexMapCache[ruleNames]
- // if(result==nil) {
- // result = ruleNames.reduce(function(o, k, i) { o[k] = i })
- // ruleIndexMapCache[ruleNames] = result
- // }
- // return result
-}
-
-func (b *BaseRecognizer) GetTokenType(tokenName string) int {
- panic("Method not defined!")
- // var ttype = b.GetTokenTypeMap()[tokenName]
- // if (ttype !=nil) {
- // return ttype
- // } else {
- // return TokenInvalidType
- // }
-}
-
-//func (b *Recognizer) GetTokenTypeMap() map[string]int {
-// Vocabulary vocabulary = getVocabulary()
-//
-// Synchronized (tokenTypeMapCache) {
-// Map result = tokenTypeMapCache.Get(vocabulary)
-// if (result == null) {
-// result = new HashMap()
-// for (int i = 0; i < GetATN().maxTokenType; i++) {
-// String literalName = vocabulary.getLiteralName(i)
-// if (literalName != null) {
-// result.put(literalName, i)
-// }
-//
-// String symbolicName = vocabulary.GetSymbolicName(i)
-// if (symbolicName != null) {
-// result.put(symbolicName, i)
-// }
-// }
-//
-// result.put("EOF", Token.EOF)
-// result = Collections.unmodifiableMap(result)
-// tokenTypeMapCache.put(vocabulary, result)
-// }
-//
-// return result
-// }
-//}
-
-// What is the error header, normally line/character position information?//
-func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
- line := e.GetOffendingToken().GetLine()
- column := e.GetOffendingToken().GetColumn()
- return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
-}
-
-// How should a token be displayed in an error message? The default
-//
-// is to display just the text, but during development you might
-// want to have a lot of information spit out. Override in that case
-// to use t.String() (which, for CommonToken, dumps everything about
-// the token). This is better than forcing you to override a method in
-// your token objects because you don't have to go modify your lexer
-// so that it creates a NewJava type.
-//
-// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
-// implementations of {@link ANTLRErrorStrategy} may provide a similar
-// feature when necessary. For example, see
-// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
-func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
- if t == nil {
- return ""
- }
- s := t.GetText()
- if s == "" {
- if t.GetTokenType() == TokenEOF {
- s = ""
- } else {
- s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
- }
- }
- s = strings.Replace(s, "\t", "\\t", -1)
- s = strings.Replace(s, "\n", "\\n", -1)
- s = strings.Replace(s, "\r", "\\r", -1)
-
- return "'" + s + "'"
-}
-
-func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
- return NewProxyErrorListener(b.listeners)
-}
-
-// subclass needs to override these if there are sempreds or actions
-// that the ATN interp needs to execute
-func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
- return true
-}
-
-func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
- return true
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
deleted file mode 100644
index 210699ba2..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// A rule context is a record of a single rule invocation. It knows
-// which context invoked it, if any. If there is no parent context, then
-// naturally the invoking state is not valid. The parent link
-// provides a chain upwards from the current rule invocation to the root
-// of the invocation tree, forming a stack. We actually carry no
-// information about the rule associated with b context (except
-// when parsing). We keep only the state number of the invoking state from
-// the ATN submachine that invoked b. Contrast b with the s
-// pointer inside ParserRuleContext that tracks the current state
-// being "executed" for the current rule.
-//
-// The parent contexts are useful for computing lookahead sets and
-// getting error information.
-//
-// These objects are used during parsing and prediction.
-// For the special case of parsers, we use the subclass
-// ParserRuleContext.
-//
-// @see ParserRuleContext
-//
-
-type RuleContext interface {
- RuleNode
-
- GetInvokingState() int
- SetInvokingState(int)
-
- GetRuleIndex() int
- IsEmpty() bool
-
- GetAltNumber() int
- SetAltNumber(altNumber int)
-
- String([]string, RuleContext) string
-}
-
-type BaseRuleContext struct {
- parentCtx RuleContext
- invokingState int
- RuleIndex int
-}
-
-func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
-
- rn := new(BaseRuleContext)
-
- // What context invoked b rule?
- rn.parentCtx = parent
-
- // What state invoked the rule associated with b context?
- // The "return address" is the followState of invokingState
- // If parent is nil, b should be -1.
- if parent == nil {
- rn.invokingState = -1
- } else {
- rn.invokingState = invokingState
- }
-
- return rn
-}
-
-func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
- return b
-}
-
-func (b *BaseRuleContext) SetParent(v Tree) {
- if v == nil {
- b.parentCtx = nil
- } else {
- b.parentCtx = v.(RuleContext)
- }
-}
-
-func (b *BaseRuleContext) GetInvokingState() int {
- return b.invokingState
-}
-
-func (b *BaseRuleContext) SetInvokingState(t int) {
- b.invokingState = t
-}
-
-func (b *BaseRuleContext) GetRuleIndex() int {
- return b.RuleIndex
-}
-
-func (b *BaseRuleContext) GetAltNumber() int {
- return ATNInvalidAltNumber
-}
-
-func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
-
-// A context is empty if there is no invoking state meaning nobody call
-// current context.
-func (b *BaseRuleContext) IsEmpty() bool {
- return b.invokingState == -1
-}
-
-// Return the combined text of all child nodes. This method only considers
-// tokens which have been added to the parse tree.
-//
-// Since tokens on hidden channels (e.g. whitespace or comments) are not
-// added to the parse trees, they will not appear in the output of b
-// method.
-//
-
-func (b *BaseRuleContext) GetParent() Tree {
- return b.parentCtx
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
deleted file mode 100644
index b3e38af34..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
+++ /dev/null
@@ -1,659 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "bytes"
- "fmt"
-)
-
-//
-// Useful for rewriting out a buffered input token stream after doing some
-// augmentation or other manipulations on it.
-
-//
-// You can insert stuff, replace, and delete chunks. Note that the operations
-// are done lazily--only if you convert the buffer to a {@link String} with
-// {@link TokenStream#getText()}. This is very efficient because you are not
-// moving data around all the time. As the buffer of tokens is converted to
-// strings, the {@link #getText()} method(s) scan the input token stream and
-// check to see if there is an operation at the current index. If so, the
-// operation is done and then normal {@link String} rendering continues on the
-// buffer. This is like having multiple Turing machine instruction streams
-// (programs) operating on a single input tape. :)
-//
-
-// This rewriter makes no modifications to the token stream. It does not ask the
-// stream to fill itself up nor does it advance the input cursor. The token
-// stream {@link TokenStream#index()} will return the same value before and
-// after any {@link #getText()} call.
-
-//
-// The rewriter only works on tokens that you have in the buffer and ignores the
-// current input cursor. If you are buffering tokens on-demand, calling
-// {@link #getText()} halfway through the input will only do rewrites for those
-// tokens in the first half of the file.
-
-//
-// Since the operations are done lazily at {@link #getText}-time, operations do
-// not screw up the token index values. That is, an insert operation at token
-// index {@code i} does not change the index values for tokens
-// {@code i}+1..n-1.
-
-//
-// Because operations never actually alter the buffer, you may always get the
-// original token stream back without undoing anything. Since the instructions
-// are queued up, you can easily simulate transactions and roll back any changes
-// if there is an error just by removing instructions. For example,
-
-//
-// CharStream input = new ANTLRFileStream("input");
-// TLexer lex = new TLexer(input);
-// CommonTokenStream tokens = new CommonTokenStream(lex);
-// T parser = new T(tokens);
-// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
-// parser.startRule();
-//
-
-//
-// Then in the rules, you can execute (assuming rewriter is visible):
-
-//
-// Token t,u;
-// ...
-// rewriter.insertAfter(t, "text to put after t");}
-// rewriter.insertAfter(u, "text after u");}
-// System.out.println(rewriter.getText());
-//
-
-//
-// You can also have multiple "instruction streams" and get multiple rewrites
-// from a single pass over the input. Just name the instruction streams and use
-// that name again when printing the buffer. This could be useful for generating
-// a C file and also its header file--all from the same buffer:
-
-//
-// rewriter.insertAfter("pass1", t, "text to put after t");}
-// rewriter.insertAfter("pass2", u, "text after u");}
-// System.out.println(rewriter.getText("pass1"));
-// System.out.println(rewriter.getText("pass2"));
-//
-
-//
-// If you don't use named rewrite streams, a "default" stream is used as the
-// first example shows.
-
-const (
- Default_Program_Name = "default"
- Program_Init_Size = 100
- Min_Token_Index = 0
-)
-
-// Define the rewrite operation hierarchy
-
-type RewriteOperation interface {
- // Execute the rewrite operation by possibly adding to the buffer.
- // Return the index of the next token to operate on.
- Execute(buffer *bytes.Buffer) int
- String() string
- GetInstructionIndex() int
- GetIndex() int
- GetText() string
- GetOpName() string
- GetTokens() TokenStream
- SetInstructionIndex(val int)
- SetIndex(int)
- SetText(string)
- SetOpName(string)
- SetTokens(TokenStream)
-}
-
-type BaseRewriteOperation struct {
- //Current index of rewrites list
- instruction_index int
- //Token buffer index
- index int
- //Substitution text
- text string
- //Actual operation name
- op_name string
- //Pointer to token steam
- tokens TokenStream
-}
-
-func (op *BaseRewriteOperation) GetInstructionIndex() int {
- return op.instruction_index
-}
-
-func (op *BaseRewriteOperation) GetIndex() int {
- return op.index
-}
-
-func (op *BaseRewriteOperation) GetText() string {
- return op.text
-}
-
-func (op *BaseRewriteOperation) GetOpName() string {
- return op.op_name
-}
-
-func (op *BaseRewriteOperation) GetTokens() TokenStream {
- return op.tokens
-}
-
-func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
- op.instruction_index = val
-}
-
-func (op *BaseRewriteOperation) SetIndex(val int) {
- op.index = val
-}
-
-func (op *BaseRewriteOperation) SetText(val string) {
- op.text = val
-}
-
-func (op *BaseRewriteOperation) SetOpName(val string) {
- op.op_name = val
-}
-
-func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
- op.tokens = val
-}
-
-func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int {
- return op.index
-}
-
-func (op *BaseRewriteOperation) String() string {
- return fmt.Sprintf("<%s@%d:\"%s\">",
- op.op_name,
- op.tokens.Get(op.GetIndex()),
- op.text,
- )
-
-}
-
-type InsertBeforeOp struct {
- BaseRewriteOperation
-}
-
-func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
- return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
- index: index,
- text: text,
- op_name: "InsertBeforeOp",
- tokens: stream,
- }}
-}
-
-func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int {
- buffer.WriteString(op.text)
- if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
- buffer.WriteString(op.tokens.Get(op.index).GetText())
- }
- return op.index + 1
-}
-
-func (op *InsertBeforeOp) String() string {
- return op.BaseRewriteOperation.String()
-}
-
-// Distinguish between insert after/before to do the "insert afters"
-// first and then the "insert befores" at same index. Implementation
-// of "insert after" is "insert before index+1".
-
-type InsertAfterOp struct {
- BaseRewriteOperation
-}
-
-func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
- return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{
- index: index + 1,
- text: text,
- tokens: stream,
- }}
-}
-
-func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
- buffer.WriteString(op.text)
- if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
- buffer.WriteString(op.tokens.Get(op.index).GetText())
- }
- return op.index + 1
-}
-
-func (op *InsertAfterOp) String() string {
- return op.BaseRewriteOperation.String()
-}
-
-// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
-// instructions.
-type ReplaceOp struct {
- BaseRewriteOperation
- LastIndex int
-}
-
-func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
- return &ReplaceOp{
- BaseRewriteOperation: BaseRewriteOperation{
- index: from,
- text: text,
- op_name: "ReplaceOp",
- tokens: stream,
- },
- LastIndex: to,
- }
-}
-
-func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int {
- if op.text != "" {
- buffer.WriteString(op.text)
- }
- return op.LastIndex + 1
-}
-
-func (op *ReplaceOp) String() string {
- if op.text == "" {
- return fmt.Sprintf("",
- op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
- }
- return fmt.Sprintf("",
- op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
-}
-
-type TokenStreamRewriter struct {
- //Our source stream
- tokens TokenStream
- // You may have multiple, named streams of rewrite operations.
- // I'm calling these things "programs."
- // Maps String (name) → rewrite (List)
- programs map[string][]RewriteOperation
- last_rewrite_token_indexes map[string]int
-}
-
-func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
- return &TokenStreamRewriter{
- tokens: tokens,
- programs: map[string][]RewriteOperation{
- Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size),
- },
- last_rewrite_token_indexes: map[string]int{},
- }
-}
-
-func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
- return tsr.tokens
-}
-
-// Rollback the instruction stream for a program so that
-// the indicated instruction (via instructionIndex) is no
-// longer in the stream. UNTESTED!
-func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) {
- is, ok := tsr.programs[program_name]
- if ok {
- tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
- }
-}
-
-func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) {
- tsr.Rollback(Default_Program_Name, instruction_index)
-}
-
-// Reset the program so that no instructions exist
-func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) {
- tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
-}
-
-func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
- tsr.DeleteProgram(Default_Program_Name)
-}
-
-func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) {
- // to insert after, just insert before next index (even if past end)
- var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
- rewrites := tsr.GetProgram(program_name)
- op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(program_name, op)
-}
-
-func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
- tsr.InsertAfter(Default_Program_Name, index, text)
-}
-
-func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) {
- tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
-}
-
-func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) {
- var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
- rewrites := tsr.GetProgram(program_name)
- op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(program_name, op)
-}
-
-func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
- tsr.InsertBefore(Default_Program_Name, index, text)
-}
-
-func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) {
- tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
-}
-
-func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) {
- if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
- panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
- from, to, tsr.tokens.Size()))
- }
- var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
- rewrites := tsr.GetProgram(program_name)
- op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(program_name, op)
-}
-
-func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
- tsr.Replace(Default_Program_Name, from, to, text)
-}
-
-func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
- tsr.ReplaceDefault(index, index, text)
-}
-
-func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) {
- tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
-}
-
-func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
- tsr.ReplaceToken(Default_Program_Name, from, to, text)
-}
-
-func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
- tsr.ReplaceTokenDefault(index, index, text)
-}
-
-func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) {
- tsr.Replace(program_name, from, to, "")
-}
-
-func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
- tsr.Delete(Default_Program_Name, from, to)
-}
-
-func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
- tsr.DeleteDefault(index, index)
-}
-
-func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) {
- tsr.ReplaceToken(program_name, from, to, "")
-}
-
-func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
- tsr.DeleteToken(Default_Program_Name, from, to)
-}
-
-func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int {
- i, ok := tsr.last_rewrite_token_indexes[program_name]
- if !ok {
- return -1
- }
- return i
-}
-
-func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
- return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
-}
-
-func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) {
- tsr.last_rewrite_token_indexes[program_name] = i
-}
-
-func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
- is := make([]RewriteOperation, 0, Program_Init_Size)
- tsr.programs[name] = is
- return is
-}
-
-func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) {
- is := tsr.GetProgram(name)
- is = append(is, op)
- tsr.programs[name] = is
-}
-
-func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
- is, ok := tsr.programs[name]
- if !ok {
- is = tsr.InitializeProgram(name)
- }
- return is
-}
-
-// Return the text from the original tokens altered per the
-// instructions given to this rewriter.
-func (tsr *TokenStreamRewriter) GetTextDefault() string {
- return tsr.GetText(
- Default_Program_Name,
- NewInterval(0, tsr.tokens.Size()-1))
-}
-
-// Return the text from the original tokens altered per the
-// instructions given to this rewriter.
-func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string {
- rewrites := tsr.programs[program_name]
- start := interval.Start
- stop := interval.Stop
- // ensure start/end are in range
- stop = min(stop, tsr.tokens.Size()-1)
- start = max(start, 0)
- if rewrites == nil || len(rewrites) == 0 {
- return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
- }
- buf := bytes.Buffer{}
- // First, optimize instruction stream
- indexToOp := reduceToSingleOperationPerIndex(rewrites)
- // Walk buffer, executing instructions and emitting tokens
- for i := start; i <= stop && i < tsr.tokens.Size(); {
- op := indexToOp[i]
- delete(indexToOp, i) // remove so any left have index size-1
- t := tsr.tokens.Get(i)
- if op == nil {
- // no operation at that index, just dump token
- if t.GetTokenType() != TokenEOF {
- buf.WriteString(t.GetText())
- }
- i++ // move to next token
- } else {
- i = op.Execute(&buf) // execute operation and skip
- }
- }
- // include stuff after end if it's last index in buffer
- // So, if they did an insertAfter(lastValidIndex, "foo"), include
- // foo if end==lastValidIndex.
- if stop == tsr.tokens.Size()-1 {
- // Scan any remaining operations after last token
- // should be included (they will be inserts).
- for _, op := range indexToOp {
- if op.GetIndex() >= tsr.tokens.Size()-1 {
- buf.WriteString(op.GetText())
- }
- }
- }
- return buf.String()
-}
-
-// We need to combine operations and report invalid operations (like
-// overlapping replaces that are not completed nested). Inserts to
-// same index need to be combined etc... Here are the cases:
-//
-// I.i.u I.j.v leave alone, nonoverlapping
-// I.i.u I.i.v combine: Iivu
-//
-// R.i-j.u R.x-y.v | i-j in x-y delete first R
-// R.i-j.u R.i-j.v delete first R
-// R.i-j.u R.x-y.v | x-y in i-j ERROR
-// R.i-j.u R.x-y.v | boundaries overlap ERROR
-//
-// Delete special case of replace (text==null):
-// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
-//
-// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
-// we're not deleting i)
-// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
-// R.x-y.v I.i.u | i in x-y ERROR
-// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
-// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
-//
-// I.i.u = insert u before op @ index i
-// R.x-y.u = replace x-y indexed tokens with u
-//
-// First we need to examine replaces. For any replace op:
-//
-// 1. wipe out any insertions before op within that range.
-// 2. Drop any replace op before that is contained completely within
-// that range.
-// 3. Throw exception upon boundary overlap with any previous replace.
-//
-// Then we can deal with inserts:
-//
-// 1. for any inserts to same index, combine even if not adjacent.
-// 2. for any prior replace with same left boundary, combine this
-// insert with replace and delete this replace.
-// 3. throw exception if index in same range as previous replace
-//
-// Don't actually delete; make op null in list. Easier to walk list.
-// Later we can throw as we add to index → op map.
-//
-// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
-// inserted stuff would be before the replace range. But, if you
-// add tokens in front of a method body '{' and then delete the method
-// body, I think the stuff before the '{' you added should disappear too.
-//
-// Return a map from token index to operation.
-func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
- // WALK REPLACES
- for i := 0; i < len(rewrites); i++ {
- op := rewrites[i]
- if op == nil {
- continue
- }
- rop, ok := op.(*ReplaceOp)
- if !ok {
- continue
- }
- // Wipe prior inserts within range
- for j := 0; j < i && j < len(rewrites); j++ {
- if iop, ok := rewrites[j].(*InsertBeforeOp); ok {
- if iop.index == rop.index {
- // E.g., insert before 2, delete 2..2; update replace
- // text to include insert before, kill insert
- rewrites[iop.instruction_index] = nil
- if rop.text != "" {
- rop.text = iop.text + rop.text
- } else {
- rop.text = iop.text
- }
- } else if iop.index > rop.index && iop.index <= rop.LastIndex {
- // delete insert as it's a no-op.
- rewrites[iop.instruction_index] = nil
- }
- }
- }
- // Drop any prior replaces contained within
- for j := 0; j < i && j < len(rewrites); j++ {
- if prevop, ok := rewrites[j].(*ReplaceOp); ok {
- if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
- // delete replace as it's a no-op.
- rewrites[prevop.instruction_index] = nil
- continue
- }
- // throw exception unless disjoint or identical
- disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
- // Delete special case of replace (text==null):
- // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
- if prevop.text == "" && rop.text == "" && !disjoint {
- rewrites[prevop.instruction_index] = nil
- rop.index = min(prevop.index, rop.index)
- rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
- println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
- } else if !disjoint {
- panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
- }
- }
- }
- }
- // WALK INSERTS
- for i := 0; i < len(rewrites); i++ {
- op := rewrites[i]
- if op == nil {
- continue
- }
- //hack to replicate inheritance in composition
- _, iok := rewrites[i].(*InsertBeforeOp)
- _, aok := rewrites[i].(*InsertAfterOp)
- if !iok && !aok {
- continue
- }
- iop := rewrites[i]
- // combine current insert with prior if any at same index
- // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
- for j := 0; j < i && j < len(rewrites); j++ {
- if nextIop, ok := rewrites[j].(*InsertAfterOp); ok {
- if nextIop.index == iop.GetIndex() {
- iop.SetText(nextIop.text + iop.GetText())
- rewrites[j] = nil
- }
- }
- if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok {
- if prevIop.index == iop.GetIndex() {
- iop.SetText(iop.GetText() + prevIop.text)
- rewrites[prevIop.instruction_index] = nil
- }
- }
- }
- // look for replaces where iop.index is in range; error
- for j := 0; j < i && j < len(rewrites); j++ {
- if rop, ok := rewrites[j].(*ReplaceOp); ok {
- if iop.GetIndex() == rop.index {
- rop.text = iop.GetText() + rop.text
- rewrites[i] = nil
- continue
- }
- if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex {
- panic("insert op " + iop.String() + " within boundaries of previous " + rop.String())
- }
- }
- }
- }
- m := map[int]RewriteOperation{}
- for i := 0; i < len(rewrites); i++ {
- op := rewrites[i]
- if op == nil {
- continue
- }
- if _, ok := m[op.GetIndex()]; ok {
- panic("should only be one op per index")
- }
- m[op.GetIndex()] = op
- }
- return m
-}
-
-/*
- Quick fixing Go lack of overloads
-*/
-
-func max(a, b int) int {
- if a > b {
- return a
- } else {
- return b
- }
-}
-func min(a, b int) int {
- if a < b {
- return a
- } else {
- return b
- }
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go
deleted file mode 100644
index 36be4f733..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go
+++ /dev/null
@@ -1,428 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-// atom, set, epsilon, action, predicate, rule transitions.
-//
-// This is a one way link. It emanates from a state (usually via a list of
-// transitions) and has a target state.
-//
-// Since we never have to change the ATN transitions once we construct it,
-// the states. We'll use the term Edge for the DFA to distinguish them from
-// ATN transitions.
-
-type Transition interface {
- getTarget() ATNState
- setTarget(ATNState)
- getIsEpsilon() bool
- getLabel() *IntervalSet
- getSerializationType() int
- Matches(int, int, int) bool
-}
-
-type BaseTransition struct {
- target ATNState
- isEpsilon bool
- label int
- intervalSet *IntervalSet
- serializationType int
-}
-
-func NewBaseTransition(target ATNState) *BaseTransition {
-
- if target == nil {
- panic("target cannot be nil.")
- }
-
- t := new(BaseTransition)
-
- t.target = target
- // Are we epsilon, action, sempred?
- t.isEpsilon = false
- t.intervalSet = nil
-
- return t
-}
-
-func (t *BaseTransition) getTarget() ATNState {
- return t.target
-}
-
-func (t *BaseTransition) setTarget(s ATNState) {
- t.target = s
-}
-
-func (t *BaseTransition) getIsEpsilon() bool {
- return t.isEpsilon
-}
-
-func (t *BaseTransition) getLabel() *IntervalSet {
- return t.intervalSet
-}
-
-func (t *BaseTransition) getSerializationType() int {
- return t.serializationType
-}
-
-func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- panic("Not implemented")
-}
-
-const (
- TransitionEPSILON = 1
- TransitionRANGE = 2
- TransitionRULE = 3
- TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
- TransitionATOM = 5
- TransitionACTION = 6
- TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
- TransitionNOTSET = 8
- TransitionWILDCARD = 9
- TransitionPRECEDENCE = 10
-)
-
-var TransitionserializationNames = []string{
- "INVALID",
- "EPSILON",
- "RANGE",
- "RULE",
- "PREDICATE",
- "ATOM",
- "ACTION",
- "SET",
- "NOT_SET",
- "WILDCARD",
- "PRECEDENCE",
-}
-
-//var TransitionserializationTypes struct {
-// EpsilonTransition int
-// RangeTransition int
-// RuleTransition int
-// PredicateTransition int
-// AtomTransition int
-// ActionTransition int
-// SetTransition int
-// NotSetTransition int
-// WildcardTransition int
-// PrecedencePredicateTransition int
-//}{
-// TransitionEPSILON,
-// TransitionRANGE,
-// TransitionRULE,
-// TransitionPREDICATE,
-// TransitionATOM,
-// TransitionACTION,
-// TransitionSET,
-// TransitionNOTSET,
-// TransitionWILDCARD,
-// TransitionPRECEDENCE
-//}
-
-// TODO: make all transitions sets? no, should remove set edges
-type AtomTransition struct {
- *BaseTransition
-}
-
-func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
-
- t := new(AtomTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.label = intervalSet // The token type or character value or, signifies special intervalSet.
- t.intervalSet = t.makeLabel()
- t.serializationType = TransitionATOM
-
- return t
-}
-
-func (t *AtomTransition) makeLabel() *IntervalSet {
- s := NewIntervalSet()
- s.addOne(t.label)
- return s
-}
-
-func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return t.label == symbol
-}
-
-func (t *AtomTransition) String() string {
- return strconv.Itoa(t.label)
-}
-
-type RuleTransition struct {
- *BaseTransition
-
- followState ATNState
- ruleIndex, precedence int
-}
-
-func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
-
- t := new(RuleTransition)
- t.BaseTransition = NewBaseTransition(ruleStart)
-
- t.ruleIndex = ruleIndex
- t.precedence = precedence
- t.followState = followState
- t.serializationType = TransitionRULE
- t.isEpsilon = true
-
- return t
-}
-
-func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-type EpsilonTransition struct {
- *BaseTransition
-
- outermostPrecedenceReturn int
-}
-
-func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
-
- t := new(EpsilonTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionEPSILON
- t.isEpsilon = true
- t.outermostPrecedenceReturn = outermostPrecedenceReturn
- return t
-}
-
-func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-func (t *EpsilonTransition) String() string {
- return "epsilon"
-}
-
-type RangeTransition struct {
- *BaseTransition
-
- start, stop int
-}
-
-func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
-
- t := new(RangeTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionRANGE
- t.start = start
- t.stop = stop
- t.intervalSet = t.makeLabel()
- return t
-}
-
-func (t *RangeTransition) makeLabel() *IntervalSet {
- s := NewIntervalSet()
- s.addRange(t.start, t.stop)
- return s
-}
-
-func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return symbol >= t.start && symbol <= t.stop
-}
-
-func (t *RangeTransition) String() string {
- var sb strings.Builder
- sb.WriteByte('\'')
- sb.WriteRune(rune(t.start))
- sb.WriteString("'..'")
- sb.WriteRune(rune(t.stop))
- sb.WriteByte('\'')
- return sb.String()
-}
-
-type AbstractPredicateTransition interface {
- Transition
- IAbstractPredicateTransitionFoo()
-}
-
-type BaseAbstractPredicateTransition struct {
- *BaseTransition
-}
-
-func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
-
- t := new(BaseAbstractPredicateTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- return t
-}
-
-func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
-
-type PredicateTransition struct {
- *BaseAbstractPredicateTransition
-
- isCtxDependent bool
- ruleIndex, predIndex int
-}
-
-func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
-
- t := new(PredicateTransition)
- t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
-
- t.serializationType = TransitionPREDICATE
- t.ruleIndex = ruleIndex
- t.predIndex = predIndex
- t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
- t.isEpsilon = true
- return t
-}
-
-func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-func (t *PredicateTransition) getPredicate() *Predicate {
- return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
-}
-
-func (t *PredicateTransition) String() string {
- return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
-}
-
-type ActionTransition struct {
- *BaseTransition
-
- isCtxDependent bool
- ruleIndex, actionIndex, predIndex int
-}
-
-func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
-
- t := new(ActionTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionACTION
- t.ruleIndex = ruleIndex
- t.actionIndex = actionIndex
- t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
- t.isEpsilon = true
- return t
-}
-
-func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-func (t *ActionTransition) String() string {
- return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
-}
-
-type SetTransition struct {
- *BaseTransition
-}
-
-func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
-
- t := new(SetTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionSET
- if set != nil {
- t.intervalSet = set
- } else {
- t.intervalSet = NewIntervalSet()
- t.intervalSet.addOne(TokenInvalidType)
- }
-
- return t
-}
-
-func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return t.intervalSet.contains(symbol)
-}
-
-func (t *SetTransition) String() string {
- return t.intervalSet.String()
-}
-
-type NotSetTransition struct {
- *SetTransition
-}
-
-func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
-
- t := new(NotSetTransition)
-
- t.SetTransition = NewSetTransition(target, set)
-
- t.serializationType = TransitionNOTSET
-
- return t
-}
-
-func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
-}
-
-func (t *NotSetTransition) String() string {
- return "~" + t.intervalSet.String()
-}
-
-type WildcardTransition struct {
- *BaseTransition
-}
-
-func NewWildcardTransition(target ATNState) *WildcardTransition {
-
- t := new(WildcardTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionWILDCARD
- return t
-}
-
-func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
-}
-
-func (t *WildcardTransition) String() string {
- return "."
-}
-
-type PrecedencePredicateTransition struct {
- *BaseAbstractPredicateTransition
-
- precedence int
-}
-
-func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
-
- t := new(PrecedencePredicateTransition)
- t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
-
- t.serializationType = TransitionPRECEDENCE
- t.precedence = precedence
- t.isEpsilon = true
-
- return t
-}
-
-func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
- return NewPrecedencePredicate(t.precedence)
-}
-
-func (t *PrecedencePredicateTransition) String() string {
- return fmt.Sprint(t.precedence) + " >= _p"
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go
deleted file mode 100644
index 85b4f137b..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go
+++ /dev/null
@@ -1,253 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// The basic notion of a tree has a parent, a payload, and a list of children.
-// It is the most abstract interface for all the trees used by ANTLR.
-///
-
-var TreeInvalidInterval = NewInterval(-1, -2)
-
-type Tree interface {
- GetParent() Tree
- SetParent(Tree)
- GetPayload() interface{}
- GetChild(i int) Tree
- GetChildCount() int
- GetChildren() []Tree
-}
-
-type SyntaxTree interface {
- Tree
-
- GetSourceInterval() *Interval
-}
-
-type ParseTree interface {
- SyntaxTree
-
- Accept(Visitor ParseTreeVisitor) interface{}
- GetText() string
-
- ToStringTree([]string, Recognizer) string
-}
-
-type RuleNode interface {
- ParseTree
-
- GetRuleContext() RuleContext
- GetBaseRuleContext() *BaseRuleContext
-}
-
-type TerminalNode interface {
- ParseTree
-
- GetSymbol() Token
-}
-
-type ErrorNode interface {
- TerminalNode
-
- errorNode()
-}
-
-type ParseTreeVisitor interface {
- Visit(tree ParseTree) interface{}
- VisitChildren(node RuleNode) interface{}
- VisitTerminal(node TerminalNode) interface{}
- VisitErrorNode(node ErrorNode) interface{}
-}
-
-type BaseParseTreeVisitor struct{}
-
-var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
-
-func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
-func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil }
-func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
-func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil }
-
-// TODO
-//func (this ParseTreeVisitor) Visit(ctx) {
-// if (Utils.isArray(ctx)) {
-// self := this
-// return ctx.map(function(child) { return VisitAtom(self, child)})
-// } else {
-// return VisitAtom(this, ctx)
-// }
-//}
-//
-//func VisitAtom(Visitor, ctx) {
-// if (ctx.parser == nil) { //is terminal
-// return
-// }
-//
-// name := ctx.parser.ruleNames[ctx.ruleIndex]
-// funcName := "Visit" + Utils.titleCase(name)
-//
-// return Visitor[funcName](ctx)
-//}
-
-type ParseTreeListener interface {
- VisitTerminal(node TerminalNode)
- VisitErrorNode(node ErrorNode)
- EnterEveryRule(ctx ParserRuleContext)
- ExitEveryRule(ctx ParserRuleContext)
-}
-
-type BaseParseTreeListener struct{}
-
-var _ ParseTreeListener = &BaseParseTreeListener{}
-
-func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {}
-func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {}
-func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {}
-func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {}
-
-type TerminalNodeImpl struct {
- parentCtx RuleContext
-
- symbol Token
-}
-
-var _ TerminalNode = &TerminalNodeImpl{}
-
-func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
- tn := new(TerminalNodeImpl)
-
- tn.parentCtx = nil
- tn.symbol = symbol
-
- return tn
-}
-
-func (t *TerminalNodeImpl) GetChild(i int) Tree {
- return nil
-}
-
-func (t *TerminalNodeImpl) GetChildren() []Tree {
- return nil
-}
-
-func (t *TerminalNodeImpl) SetChildren(tree []Tree) {
- panic("Cannot set children on terminal node")
-}
-
-func (t *TerminalNodeImpl) GetSymbol() Token {
- return t.symbol
-}
-
-func (t *TerminalNodeImpl) GetParent() Tree {
- return t.parentCtx
-}
-
-func (t *TerminalNodeImpl) SetParent(tree Tree) {
- t.parentCtx = tree.(RuleContext)
-}
-
-func (t *TerminalNodeImpl) GetPayload() interface{} {
- return t.symbol
-}
-
-func (t *TerminalNodeImpl) GetSourceInterval() *Interval {
- if t.symbol == nil {
- return TreeInvalidInterval
- }
- tokenIndex := t.symbol.GetTokenIndex()
- return NewInterval(tokenIndex, tokenIndex)
-}
-
-func (t *TerminalNodeImpl) GetChildCount() int {
- return 0
-}
-
-func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
- return v.VisitTerminal(t)
-}
-
-func (t *TerminalNodeImpl) GetText() string {
- return t.symbol.GetText()
-}
-
-func (t *TerminalNodeImpl) String() string {
- if t.symbol.GetTokenType() == TokenEOF {
- return ""
- }
-
- return t.symbol.GetText()
-}
-
-func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string {
- return t.String()
-}
-
-// Represents a token that was consumed during reSynchronization
-// rather than during a valid Match operation. For example,
-// we will create this kind of a node during single token insertion
-// and deletion as well as during "consume until error recovery set"
-// upon no viable alternative exceptions.
-
-type ErrorNodeImpl struct {
- *TerminalNodeImpl
-}
-
-var _ ErrorNode = &ErrorNodeImpl{}
-
-func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
- en := new(ErrorNodeImpl)
- en.TerminalNodeImpl = NewTerminalNodeImpl(token)
- return en
-}
-
-func (e *ErrorNodeImpl) errorNode() {}
-
-func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
- return v.VisitErrorNode(e)
-}
-
-type ParseTreeWalker struct {
-}
-
-func NewParseTreeWalker() *ParseTreeWalker {
- return new(ParseTreeWalker)
-}
-
-// Performs a walk on the given parse tree starting at the root and going down recursively
-// with depth-first search. On each node, EnterRule is called before
-// recursively walking down into child nodes, then
-// ExitRule is called after the recursive call to wind up.
-func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
- switch tt := t.(type) {
- case ErrorNode:
- listener.VisitErrorNode(tt)
- case TerminalNode:
- listener.VisitTerminal(tt)
- default:
- p.EnterRule(listener, t.(RuleNode))
- for i := 0; i < t.GetChildCount(); i++ {
- child := t.GetChild(i)
- p.Walk(listener, child)
- }
- p.ExitRule(listener, t.(RuleNode))
- }
-}
-
-// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule}
-// then by triggering the event specific to the given parse tree node
-func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
- ctx := r.GetRuleContext().(ParserRuleContext)
- listener.EnterEveryRule(ctx)
- ctx.EnterRule(listener)
-}
-
-// Exits a grammar rule by first triggering the event specific to the given parse tree node
-// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
-func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
- ctx := r.GetRuleContext().(ParserRuleContext)
- ctx.ExitRule(listener)
- listener.ExitEveryRule(ctx)
-}
-
-var ParseTreeWalkerDefault = NewParseTreeWalker()
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go
deleted file mode 100644
index 9fad5d916..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "bytes"
- "errors"
- "fmt"
- "math/bits"
- "strconv"
- "strings"
-)
-
-func intMin(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-func intMax(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
-// A simple integer stack
-
-type IntStack []int
-
-var ErrEmptyStack = errors.New("Stack is empty")
-
-func (s *IntStack) Pop() (int, error) {
- l := len(*s) - 1
- if l < 0 {
- return 0, ErrEmptyStack
- }
- v := (*s)[l]
- *s = (*s)[0:l]
- return v, nil
-}
-
-func (s *IntStack) Push(e int) {
- *s = append(*s, e)
-}
-
-type comparable interface {
- Equals(other Collectable[any]) bool
-}
-
-func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool {
-
- return a.Equals(b)
-}
-
-func standardHashFunction(a interface{}) int {
- if h, ok := a.(hasher); ok {
- return h.Hash()
- }
-
- panic("Not Hasher")
-}
-
-type hasher interface {
- Hash() int
-}
-
-const bitsPerWord = 64
-
-func indexForBit(bit int) int {
- return bit / bitsPerWord
-}
-
-func wordForBit(data []uint64, bit int) uint64 {
- idx := indexForBit(bit)
- if idx >= len(data) {
- return 0
- }
- return data[idx]
-}
-
-func maskForBit(bit int) uint64 {
- return uint64(1) << (bit % bitsPerWord)
-}
-
-func wordsNeeded(bit int) int {
- return indexForBit(bit) + 1
-}
-
-type BitSet struct {
- data []uint64
-}
-
-func NewBitSet() *BitSet {
- return &BitSet{}
-}
-
-func (b *BitSet) add(value int) {
- idx := indexForBit(value)
- if idx >= len(b.data) {
- size := wordsNeeded(value)
- data := make([]uint64, size)
- copy(data, b.data)
- b.data = data
- }
- b.data[idx] |= maskForBit(value)
-}
-
-func (b *BitSet) clear(index int) {
- idx := indexForBit(index)
- if idx >= len(b.data) {
- return
- }
- b.data[idx] &= ^maskForBit(index)
-}
-
-func (b *BitSet) or(set *BitSet) {
- // Get min size necessary to represent the bits in both sets.
- bLen := b.minLen()
- setLen := set.minLen()
- maxLen := intMax(bLen, setLen)
- if maxLen > len(b.data) {
- // Increase the size of len(b.data) to repesent the bits in both sets.
- data := make([]uint64, maxLen)
- copy(data, b.data)
- b.data = data
- }
- // len(b.data) is at least setLen.
- for i := 0; i < setLen; i++ {
- b.data[i] |= set.data[i]
- }
-}
-
-func (b *BitSet) remove(value int) {
- b.clear(value)
-}
-
-func (b *BitSet) contains(value int) bool {
- idx := indexForBit(value)
- if idx >= len(b.data) {
- return false
- }
- return (b.data[idx] & maskForBit(value)) != 0
-}
-
-func (b *BitSet) minValue() int {
- for i, v := range b.data {
- if v == 0 {
- continue
- }
- return i*bitsPerWord + bits.TrailingZeros64(v)
- }
- return 2147483647
-}
-
-func (b *BitSet) equals(other interface{}) bool {
- otherBitSet, ok := other.(*BitSet)
- if !ok {
- return false
- }
-
- if b == otherBitSet {
- return true
- }
-
- // We only compare set bits, so we cannot rely on the two slices having the same size. Its
- // possible for two BitSets to have different slice lengths but the same set bits. So we only
- // compare the relevant words and ignore the trailing zeros.
- bLen := b.minLen()
- otherLen := otherBitSet.minLen()
-
- if bLen != otherLen {
- return false
- }
-
- for i := 0; i < bLen; i++ {
- if b.data[i] != otherBitSet.data[i] {
- return false
- }
- }
-
- return true
-}
-
-func (b *BitSet) minLen() int {
- for i := len(b.data); i > 0; i-- {
- if b.data[i-1] != 0 {
- return i
- }
- }
- return 0
-}
-
-func (b *BitSet) length() int {
- cnt := 0
- for _, val := range b.data {
- cnt += bits.OnesCount64(val)
- }
- return cnt
-}
-
-func (b *BitSet) String() string {
- vals := make([]string, 0, b.length())
-
- for i, v := range b.data {
- for v != 0 {
- n := bits.TrailingZeros64(v)
- vals = append(vals, strconv.Itoa(i*bitsPerWord+n))
- v &= ^(uint64(1) << n)
- }
- }
-
- return "{" + strings.Join(vals, ", ") + "}"
-}
-
-type AltDict struct {
- data map[string]interface{}
-}
-
-func NewAltDict() *AltDict {
- d := new(AltDict)
- d.data = make(map[string]interface{})
- return d
-}
-
-func (a *AltDict) Get(key string) interface{} {
- key = "k-" + key
- return a.data[key]
-}
-
-func (a *AltDict) put(key string, value interface{}) {
- key = "k-" + key
- a.data[key] = value
-}
-
-func (a *AltDict) values() []interface{} {
- vs := make([]interface{}, len(a.data))
- i := 0
- for _, v := range a.data {
- vs[i] = v
- i++
- }
- return vs
-}
-
-type DoubleDict struct {
- data map[int]map[int]interface{}
-}
-
-func NewDoubleDict() *DoubleDict {
- dd := new(DoubleDict)
- dd.data = make(map[int]map[int]interface{})
- return dd
-}
-
-func (d *DoubleDict) Get(a, b int) interface{} {
- data := d.data[a]
-
- if data == nil {
- return nil
- }
-
- return data[b]
-}
-
-func (d *DoubleDict) set(a, b int, o interface{}) {
- data := d.data[a]
-
- if data == nil {
- data = make(map[int]interface{})
- d.data[a] = data
- }
-
- data[b] = o
-}
-
-func EscapeWhitespace(s string, escapeSpaces bool) string {
-
- s = strings.Replace(s, "\t", "\\t", -1)
- s = strings.Replace(s, "\n", "\\n", -1)
- s = strings.Replace(s, "\r", "\\r", -1)
- if escapeSpaces {
- s = strings.Replace(s, " ", "\u00B7", -1)
- }
- return s
-}
-
-func TerminalNodeToStringArray(sa []TerminalNode) []string {
- st := make([]string, len(sa))
-
- for i, s := range sa {
- st[i] = fmt.Sprintf("%v", s)
- }
-
- return st
-}
-
-func PrintArrayJavaStyle(sa []string) string {
- var buffer bytes.Buffer
-
- buffer.WriteString("[")
-
- for i, s := range sa {
- buffer.WriteString(s)
- if i != len(sa)-1 {
- buffer.WriteString(", ")
- }
- }
-
- buffer.WriteString("]")
-
- return buffer.String()
-}
-
-// murmur hash
-func murmurInit(seed int) int {
- return seed
-}
-
-func murmurUpdate(h int, value int) int {
- const c1 uint32 = 0xCC9E2D51
- const c2 uint32 = 0x1B873593
- const r1 uint32 = 15
- const r2 uint32 = 13
- const m uint32 = 5
- const n uint32 = 0xE6546B64
-
- k := uint32(value)
- k *= c1
- k = (k << r1) | (k >> (32 - r1))
- k *= c2
-
- hash := uint32(h) ^ k
- hash = (hash << r2) | (hash >> (32 - r2))
- hash = hash*m + n
- return int(hash)
-}
-
-func murmurFinish(h int, numberOfWords int) int {
- var hash = uint32(h)
- hash ^= uint32(numberOfWords) << 2
- hash ^= hash >> 16
- hash *= 0x85ebca6b
- hash ^= hash >> 13
- hash *= 0xc2b2ae35
- hash ^= hash >> 16
-
- return int(hash)
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
deleted file mode 100644
index c9bd6751e..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
+++ /dev/null
@@ -1,235 +0,0 @@
-package antlr
-
-import "math"
-
-const (
- _initalCapacity = 16
- _initalBucketCapacity = 8
- _loadFactor = 0.75
-)
-
-type Set interface {
- Add(value interface{}) (added interface{})
- Len() int
- Get(value interface{}) (found interface{})
- Contains(value interface{}) bool
- Values() []interface{}
- Each(f func(interface{}) bool)
-}
-
-type array2DHashSet struct {
- buckets [][]Collectable[any]
- hashcodeFunction func(interface{}) int
- equalsFunction func(Collectable[any], Collectable[any]) bool
-
- n int // How many elements in set
- threshold int // when to expand
-
- currentPrime int // jump by 4 primes each expand or whatever
- initialBucketCapacity int
-}
-
-func (as *array2DHashSet) Each(f func(interface{}) bool) {
- if as.Len() < 1 {
- return
- }
-
- for _, bucket := range as.buckets {
- for _, o := range bucket {
- if o == nil {
- break
- }
- if !f(o) {
- return
- }
- }
- }
-}
-
-func (as *array2DHashSet) Values() []interface{} {
- if as.Len() < 1 {
- return nil
- }
-
- values := make([]interface{}, 0, as.Len())
- as.Each(func(i interface{}) bool {
- values = append(values, i)
- return true
- })
- return values
-}
-
-func (as *array2DHashSet) Contains(value Collectable[any]) bool {
- return as.Get(value) != nil
-}
-
-func (as *array2DHashSet) Add(value Collectable[any]) interface{} {
- if as.n > as.threshold {
- as.expand()
- }
- return as.innerAdd(value)
-}
-
-func (as *array2DHashSet) expand() {
- old := as.buckets
-
- as.currentPrime += 4
-
- var (
- newCapacity = len(as.buckets) << 1
- newTable = as.createBuckets(newCapacity)
- newBucketLengths = make([]int, len(newTable))
- )
-
- as.buckets = newTable
- as.threshold = int(float64(newCapacity) * _loadFactor)
-
- for _, bucket := range old {
- if bucket == nil {
- continue
- }
-
- for _, o := range bucket {
- if o == nil {
- break
- }
-
- b := as.getBuckets(o)
- bucketLength := newBucketLengths[b]
- var newBucket []Collectable[any]
- if bucketLength == 0 {
- // new bucket
- newBucket = as.createBucket(as.initialBucketCapacity)
- newTable[b] = newBucket
- } else {
- newBucket = newTable[b]
- if bucketLength == len(newBucket) {
- // expand
- newBucketCopy := make([]Collectable[any], len(newBucket)<<1)
- copy(newBucketCopy[:bucketLength], newBucket)
- newBucket = newBucketCopy
- newTable[b] = newBucket
- }
- }
-
- newBucket[bucketLength] = o
- newBucketLengths[b]++
- }
- }
-}
-
-func (as *array2DHashSet) Len() int {
- return as.n
-}
-
-func (as *array2DHashSet) Get(o Collectable[any]) interface{} {
- if o == nil {
- return nil
- }
-
- b := as.getBuckets(o)
- bucket := as.buckets[b]
- if bucket == nil { // no bucket
- return nil
- }
-
- for _, e := range bucket {
- if e == nil {
- return nil // empty slot; not there
- }
- if as.equalsFunction(e, o) {
- return e
- }
- }
-
- return nil
-}
-
-func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} {
- b := as.getBuckets(o)
-
- bucket := as.buckets[b]
-
- // new bucket
- if bucket == nil {
- bucket = as.createBucket(as.initialBucketCapacity)
- bucket[0] = o
-
- as.buckets[b] = bucket
- as.n++
- return o
- }
-
- // look for it in bucket
- for i := 0; i < len(bucket); i++ {
- existing := bucket[i]
- if existing == nil { // empty slot; not there, add.
- bucket[i] = o
- as.n++
- return o
- }
-
- if as.equalsFunction(existing, o) { // found existing, quit
- return existing
- }
- }
-
- // full bucket, expand and add to end
- oldLength := len(bucket)
- bucketCopy := make([]Collectable[any], oldLength<<1)
- copy(bucketCopy[:oldLength], bucket)
- bucket = bucketCopy
- as.buckets[b] = bucket
- bucket[oldLength] = o
- as.n++
- return o
-}
-
-func (as *array2DHashSet) getBuckets(value Collectable[any]) int {
- hash := as.hashcodeFunction(value)
- return hash & (len(as.buckets) - 1)
-}
-
-func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] {
- return make([][]Collectable[any], cap)
-}
-
-func (as *array2DHashSet) createBucket(cap int) []Collectable[any] {
- return make([]Collectable[any], cap)
-}
-
-func newArray2DHashSetWithCap(
- hashcodeFunction func(interface{}) int,
- equalsFunction func(Collectable[any], Collectable[any]) bool,
- initCap int,
- initBucketCap int,
-) *array2DHashSet {
- if hashcodeFunction == nil {
- hashcodeFunction = standardHashFunction
- }
-
- if equalsFunction == nil {
- equalsFunction = standardEqualsFunction
- }
-
- ret := &array2DHashSet{
- hashcodeFunction: hashcodeFunction,
- equalsFunction: equalsFunction,
-
- n: 0,
- threshold: int(math.Floor(_initalCapacity * _loadFactor)),
-
- currentPrime: 1,
- initialBucketCapacity: initBucketCap,
- }
-
- ret.buckets = ret.createBuckets(initCap)
- return ret
-}
-
-func newArray2DHashSet(
- hashcodeFunction func(interface{}) int,
- equalsFunction func(Collectable[any], Collectable[any]) bool,
-) *array2DHashSet {
- return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/.gitignore b/vendor/github.com/antlr4-go/antlr/v4/.gitignore
new file mode 100644
index 000000000..38ea34ff5
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/.gitignore
@@ -0,0 +1,18 @@
+### Go template
+
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+
+# Go workspace file
+go.work
+
+# No Goland stuff in this repo
+.idea
diff --git a/vendor/github.com/antlr4-go/antlr/v4/LICENSE b/vendor/github.com/antlr4-go/antlr/v4/LICENSE
new file mode 100644
index 000000000..a22292eb5
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+3. Neither name of copyright holders nor the names of its contributors
+may be used to endorse or promote products derived from this software
+without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr4-go/antlr/v4/README.md b/vendor/github.com/antlr4-go/antlr/v4/README.md
new file mode 100644
index 000000000..03e5b83eb
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/README.md
@@ -0,0 +1,54 @@
+[](https://goreportcard.com/report/github.com/antlr4-go/antlr)
+[](https://pkg.go.dev/github.com/antlr4-go/antlr)
+[](https://github.com/antlr4-go/antlr/releases/latest)
+[](https://github.com/antlr4-go/antlr/releases/latest)
+[](https://github.com/antlr4-go/antlr/commit-activity)
+[](https://opensource.org/licenses/BSD-3-Clause)
+[](https://GitHub.com/Naereen/StrapDown.js/stargazers/)
+# ANTLR4 Go Runtime Module Repo
+
+IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here.
+
+ - Do not submit PRs or any change requests to this repo
+ - This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR
+ - This repo contains the Go runtime that your generated projects should import
+
+## Introduction
+
+This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained
+at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create
+the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here.
+
+The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically.
+
+### Why?
+
+The `go get` command is unable to retrieve the Go runtime when it is embedded so
+deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime,
+does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and
+causes confusion.
+
+For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as:
+
+```sh
+require (
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc
+)
+```
+
+Where you would expect to see:
+
+```sh
+require (
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0
+)
+```
+
+The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and
+from whence users can expect `go get` to behave as expected.
+
+
+# Documentation
+Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on
+migrating existing projects to use the new module location and for information on how to use the Go runtime in
+general.
diff --git a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
new file mode 100644
index 000000000..3bb4fd7c4
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
@@ -0,0 +1,102 @@
+/*
+Package antlr implements the Go version of the ANTLR 4 runtime.
+
+# The ANTLR Tool
+
+ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
+or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
+From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
+(or visitor) that makes it easy to respond to the recognition of phrases of interest.
+
+# Go Runtime
+
+At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime
+source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of
+ANTLR4 that it is compatible with (I.E. uses the /v4 path).
+
+However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root
+of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code.
+This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not
+list the release tag such as @4.12.0 - this was confusing, to say the least.
+
+As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr`
+(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information,
+which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs].
+
+This means that if you are using the source code without modules, you should also use the source code in the [new repo].
+Though we highly recommend that you use go modules, as they are now idiomatic for Go.
+
+I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good.
+
+Go runtime author: [Jim Idle] jimi@idle.ws
+
+# Code Generation
+
+ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
+runtime library, written specifically to support the generated code in the target language. This library is the
+runtime for the Go target.
+
+To generate code for the go target, it is generally recommended to place the source grammar files in a package of
+their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
+it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
+that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other
+way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
+your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as
+it was at any point in its history.
+
+Here is a general/recommended template for an ANTLR based recognizer in Go:
+
+ .
+ ├── parser
+ │ ├── mygrammar.g4
+ │ ├── antlr-4.12.1-complete.jar
+ │ ├── generate.go
+ │ └── generate.sh
+ ├── parsing - generated code goes here
+ │ └── error_listeners.go
+ ├── go.mod
+ ├── go.sum
+ ├── main.go
+ └── main_test.go
+
+Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in.
+
+The generate.go file then looks like this:
+
+ package parser
+
+ //go:generate ./generate.sh
+
+And the generate.sh file will look similar to this:
+
+ #!/bin/sh
+
+ alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
+ antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4
+
+depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here
+is to generate the code into a
+
+From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command:
+
+ go generate ./...
+
+Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code
+by importing the parsing package.
+
+There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like.
+
+# Copyright Notice
+
+Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
+
+Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
+
+[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
+[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
+[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md
+[new repo]: https://github.com/antlr4-go/antlr
+[Jim Idle]: https://github.com/jimidle
+[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md
+*/
+package antlr
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go b/vendor/github.com/antlr4-go/antlr/v4/atn.go
similarity index 94%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
rename to vendor/github.com/antlr4-go/antlr/v4/atn.go
index 98010d2e6..cdeefed24 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn.go
@@ -20,10 +20,11 @@ var ATNInvalidAltNumber int
// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
type ATN struct {
- // DecisionToState is the decision points for all rules, subrules, optional
- // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we
+
+ // DecisionToState is the decision points for all rules, sub-rules, optional
+ // blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we
// can go back later and build DFA predictors for them. This includes
- // all the rules, subrules, optional blocks, ()+, ()* etc...
+ // all the rules, sub-rules, optional blocks, ()+, ()* etc...
DecisionToState []DecisionState
// grammarType is the ATN type and is used for deserializing ATNs from strings.
@@ -51,6 +52,8 @@ type ATN struct {
// specified, and otherwise is nil.
ruleToTokenType []int
+ // ATNStates is a list of all states in the ATN, ordered by state number.
+ //
states []ATNState
mu sync.Mutex
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
new file mode 100644
index 000000000..a83f25d34
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
@@ -0,0 +1,335 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+const (
+ lexerConfig = iota // Indicates that this ATNConfig is for a lexer
+ parserConfig // Indicates that this ATNConfig is for a parser
+)
+
+// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
+// context). The syntactic context is a graph-structured stack node whose
+// path(s) to the root is the rule invocation(s) chain used to arrive in the
+// state. The semantic context is the tree of semantic predicates encountered
+// before reaching an ATN state.
+type ATNConfig struct {
+ precedenceFilterSuppressed bool
+ state ATNState
+ alt int
+ context *PredictionContext
+ semanticContext SemanticContext
+ reachesIntoOuterContext int
+ cType int // lexerConfig or parserConfig
+ lexerActionExecutor *LexerActionExecutor
+ passedThroughNonGreedyDecision bool
+}
+
+// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only
+func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ return NewATNConfig5(state, alt, context, SemanticContextNone)
+}
+
+// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context
+func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Necessary?
+ }
+
+ pac := &ATNConfig{}
+ pac.state = state
+ pac.alt = alt
+ pac.context = context
+ pac.semanticContext = semanticContext
+ pac.cType = parserConfig
+ return pac
+}
+
+// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only
+func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
+ return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
+}
+
+// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context
+func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig {
+ return NewATNConfig(c, state, c.GetContext(), semanticContext)
+}
+
+// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only
+func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig {
+ return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
+}
+
+// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only
+func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
+ return NewATNConfig(c, state, context, c.GetSemanticContext())
+}
+
+// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors'
+// are just wrappers around this one.
+func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed
+ }
+ b := &ATNConfig{}
+ b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext)
+ b.cType = parserConfig
+ return b
+}
+
+func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) {
+
+ a.state = state
+ a.alt = alt
+ a.context = context
+ a.semanticContext = semanticContext
+ a.reachesIntoOuterContext = c.GetReachesIntoOuterContext()
+ a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed()
+}
+
+func (a *ATNConfig) getPrecedenceFilterSuppressed() bool {
+ return a.precedenceFilterSuppressed
+}
+
+func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) {
+ a.precedenceFilterSuppressed = v
+}
+
+// GetState returns the ATN state associated with this configuration
+func (a *ATNConfig) GetState() ATNState {
+ return a.state
+}
+
+// GetAlt returns the alternative associated with this configuration
+func (a *ATNConfig) GetAlt() int {
+ return a.alt
+}
+
+// SetContext sets the rule invocation stack associated with this configuration
+func (a *ATNConfig) SetContext(v *PredictionContext) {
+ a.context = v
+}
+
+// GetContext returns the rule invocation stack associated with this configuration
+func (a *ATNConfig) GetContext() *PredictionContext {
+ return a.context
+}
+
+// GetSemanticContext returns the semantic context associated with this configuration
+func (a *ATNConfig) GetSemanticContext() SemanticContext {
+ return a.semanticContext
+}
+
+// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration
+func (a *ATNConfig) GetReachesIntoOuterContext() int {
+ return a.reachesIntoOuterContext
+}
+
+// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration
+func (a *ATNConfig) SetReachesIntoOuterContext(v int) {
+ a.reachesIntoOuterContext = v
+}
+
+// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
+// for a collection.
+//
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool {
+ switch a.cType {
+ case lexerConfig:
+ return a.LEquals(o)
+ case parserConfig:
+ return a.PEquals(o)
+ default:
+ panic("Invalid ATNConfig type")
+ }
+}
+
+// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required
+// for a collection.
+//
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool {
+ var other, ok = o.(*ATNConfig)
+
+ if !ok {
+ return false
+ }
+ if a == other {
+ return true
+ } else if other == nil {
+ return false
+ }
+
+ var equal bool
+
+ if a.context == nil {
+ equal = other.context == nil
+ } else {
+ equal = a.context.Equals(other.context)
+ }
+
+ var (
+ nums = a.state.GetStateNumber() == other.state.GetStateNumber()
+ alts = a.alt == other.alt
+ cons = a.semanticContext.Equals(other.semanticContext)
+ sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed
+ )
+
+ return nums && alts && cons && sups && equal
+}
+
+// Hash is the default hash function for a parser ATNConfig, when no specialist hash function
+// is required for a collection
+func (a *ATNConfig) Hash() int {
+ switch a.cType {
+ case lexerConfig:
+ return a.LHash()
+ case parserConfig:
+ return a.PHash()
+ default:
+ panic("Invalid ATNConfig type")
+ }
+}
+
+// PHash is the default hash function for a parser ATNConfig, when no specialist hash function
+// is required for a collection
+func (a *ATNConfig) PHash() int {
+ var c int
+ if a.context != nil {
+ c = a.context.Hash()
+ }
+
+ h := murmurInit(7)
+ h = murmurUpdate(h, a.state.GetStateNumber())
+ h = murmurUpdate(h, a.alt)
+ h = murmurUpdate(h, c)
+ h = murmurUpdate(h, a.semanticContext.Hash())
+ return murmurFinish(h, 4)
+}
+
+// String returns a string representation of the ATNConfig, usually used for debugging purposes
+func (a *ATNConfig) String() string {
+ var s1, s2, s3 string
+
+ if a.context != nil {
+ s1 = ",[" + fmt.Sprint(a.context) + "]"
+ }
+
+ if a.semanticContext != SemanticContextNone {
+ s2 = "," + fmt.Sprint(a.semanticContext)
+ }
+
+ if a.reachesIntoOuterContext > 0 {
+ s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext)
+ }
+
+ return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3)
+}
+
+func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.state = state
+ lac.alt = alt
+ lac.context = context
+ lac.semanticContext = SemanticContextNone
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = c.lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = c.lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.state = state
+ lac.alt = alt
+ lac.context = context
+ lac.semanticContext = SemanticContextNone
+ lac.cType = lexerConfig
+ return lac
+}
+
+// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (a *ATNConfig) LHash() int {
+ var f int
+ if a.passedThroughNonGreedyDecision {
+ f = 1
+ } else {
+ f = 0
+ }
+ h := murmurInit(7)
+ h = murmurUpdate(h, a.state.GetStateNumber())
+ h = murmurUpdate(h, a.alt)
+ h = murmurUpdate(h, a.context.Hash())
+ h = murmurUpdate(h, a.semanticContext.Hash())
+ h = murmurUpdate(h, f)
+ h = murmurUpdate(h, a.lexerActionExecutor.Hash())
+ h = murmurFinish(h, 6)
+ return h
+}
+
+// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool {
+ var otherT, ok = other.(*ATNConfig)
+ if !ok {
+ return false
+ } else if a == otherT {
+ return true
+ } else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision {
+ return false
+ }
+
+ switch {
+ case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil:
+ return true
+ case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil:
+ if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) {
+ return false
+ }
+ default:
+ return false // One but not both, are nil
+ }
+
+ return a.PEquals(otherT)
+}
+
+func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool {
+ var ds, ok = target.(DecisionState)
+
+ return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
new file mode 100644
index 000000000..52dbaf806
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
@@ -0,0 +1,301 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// ATNConfigSet is a specialized set of ATNConfig that tracks information
+// about its elements and can combine similar configurations using a
+// graph-structured stack.
+type ATNConfigSet struct {
+ cachedHash int
+
+ // configLookup is used to determine whether two ATNConfigSets are equal. We
+ // need all configurations with the same (s, i, _, semctx) to be equal. A key
+ // effectively doubles the number of objects associated with ATNConfigs. All
+ // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
+ // read-only because a set becomes a DFA state.
+ configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]]
+
+ // configs is the added elements that did not match an existing key in configLookup
+ configs []*ATNConfig
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves re-computation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ conflictingAlts *BitSet
+
+ // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
+ // we hit a pred while computing a closure operation. Do not make a DFA state
+ // from the ATNConfigSet in this case. TODO: How is this used by parsers?
+ dipsIntoOuterContext bool
+
+ // fullCtx is whether it is part of a full context LL prediction. Used to
+ // determine how to merge $. It is a wildcard with SLL, but not for an LL
+ // context merge.
+ fullCtx bool
+
+ // Used in parser and lexer. In lexer, it indicates we hit a pred
+ // while computing a closure operation. Don't make a DFA state from this set.
+ hasSemanticContext bool
+
+ // readOnly is whether it is read-only. Do not
+ // allow any code to manipulate the set if true because DFA states will point at
+ // sets and those must not change. It not, protect other fields; conflictingAlts
+ // in particular, which is assigned after readOnly.
+ readOnly bool
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves re-computation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ uniqueAlt int
+}
+
+// Alts returns the combined set of alts for all the configurations in this set.
+func (b *ATNConfigSet) Alts() *BitSet {
+ alts := NewBitSet()
+ for _, it := range b.configs {
+ alts.add(it.GetAlt())
+ }
+ return alts
+}
+
+// NewATNConfigSet creates a new ATNConfigSet instance.
+func NewATNConfigSet(fullCtx bool) *ATNConfigSet {
+ return &ATNConfigSet{
+ cachedHash: -1,
+ configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"),
+ fullCtx: fullCtx,
+ }
+}
+
+// Add merges contexts with existing configs for (s, i, pi, _),
+// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and
+// 'pi' is the [ATNConfig].semanticContext.
+//
+// We use (s,i,pi) as the key.
+// Updates dipsIntoOuterContext and hasSemanticContext when necessary.
+func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if config.GetSemanticContext() != SemanticContextNone {
+ b.hasSemanticContext = true
+ }
+
+ if config.GetReachesIntoOuterContext() > 0 {
+ b.dipsIntoOuterContext = true
+ }
+
+ existing, present := b.configLookup.Put(config)
+
+ // The config was not already in the set
+ //
+ if !present {
+ b.cachedHash = -1
+ b.configs = append(b.configs, config) // Track order here
+ return true
+ }
+
+ // Merge a previous (s, i, pi, _) with it and save the result
+ rootIsWildcard := !b.fullCtx
+ merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
+
+ // No need to check for existing.context because config.context is in the cache,
+ // since the only way to create new graphs is the "call rule" and here. We cache
+ // at both places.
+ existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
+
+ // Preserve the precedence filter suppression during the merge
+ if config.getPrecedenceFilterSuppressed() {
+ existing.setPrecedenceFilterSuppressed(true)
+ }
+
+ // Replace the context because there is no need to do alt mapping
+ existing.SetContext(merged)
+
+ return true
+}
+
+// GetStates returns the set of states represented by all configurations in this config set
+func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
+
+ // states uses the standard comparator and Hash() provided by the ATNState instance
+ //
+ states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()")
+
+ for i := 0; i < len(b.configs); i++ {
+ states.Put(b.configs[i].GetState())
+ }
+
+ return states
+}
+
+func (b *ATNConfigSet) GetPredicates() []SemanticContext {
+ predicates := make([]SemanticContext, 0)
+
+ for i := 0; i < len(b.configs); i++ {
+ c := b.configs[i].GetSemanticContext()
+
+ if c != SemanticContextNone {
+ predicates = append(predicates, c)
+ }
+ }
+
+ return predicates
+}
+
+func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ // Empty indicate no optimization is possible
+ if b.configLookup == nil || b.configLookup.Len() == 0 {
+ return
+ }
+
+ for i := 0; i < len(b.configs); i++ {
+ config := b.configs[i]
+ config.SetContext(interpreter.getCachedContext(config.GetContext()))
+ }
+}
+
+func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool {
+ for i := 0; i < len(coll); i++ {
+ b.Add(coll[i], nil)
+ }
+
+ return false
+}
+
+// Compare The configs are only equal if they are in the same order and their Equals function returns true.
+// Java uses ArrayList.equals(), which requires the same order.
+func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool {
+ if len(b.configs) != len(bs.configs) {
+ return false
+ }
+ for i := 0; i < len(b.configs); i++ {
+ if !b.configs[i].Equals(bs.configs[i]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*ATNConfigSet); !ok {
+ return false
+ }
+
+ other2 := other.(*ATNConfigSet)
+ var eca bool
+ switch {
+ case b.conflictingAlts == nil && other2.conflictingAlts == nil:
+ eca = true
+ case b.conflictingAlts != nil && other2.conflictingAlts != nil:
+ eca = b.conflictingAlts.equals(other2.conflictingAlts)
+ }
+ return b.configs != nil &&
+ b.fullCtx == other2.fullCtx &&
+ b.uniqueAlt == other2.uniqueAlt &&
+ eca &&
+ b.hasSemanticContext == other2.hasSemanticContext &&
+ b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
+ b.Compare(other2)
+}
+
+func (b *ATNConfigSet) Hash() int {
+ if b.readOnly {
+ if b.cachedHash == -1 {
+ b.cachedHash = b.hashCodeConfigs()
+ }
+
+ return b.cachedHash
+ }
+
+ return b.hashCodeConfigs()
+}
+
+func (b *ATNConfigSet) hashCodeConfigs() int {
+ h := 1
+ for _, config := range b.configs {
+ h = 31*h + config.Hash()
+ }
+ return h
+}
+
+func (b *ATNConfigSet) Contains(item *ATNConfig) bool {
+ if b.readOnly {
+ panic("not implemented for read-only sets")
+ }
+ if b.configLookup == nil {
+ return false
+ }
+ return b.configLookup.Contains(item)
+}
+
+func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool {
+ return b.Contains(item)
+}
+
+func (b *ATNConfigSet) Clear() {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+ b.configs = make([]*ATNConfig, 0)
+ b.cachedHash = -1
+ b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()")
+}
+
+func (b *ATNConfigSet) String() string {
+
+ s := "["
+
+ for i, c := range b.configs {
+ s += c.String()
+
+ if i != len(b.configs)-1 {
+ s += ", "
+ }
+ }
+
+ s += "]"
+
+ if b.hasSemanticContext {
+ s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
+ }
+
+ if b.uniqueAlt != ATNInvalidAltNumber {
+ s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
+ }
+
+ if b.conflictingAlts != nil {
+ s += ",conflictingAlts=" + b.conflictingAlts.String()
+ }
+
+ if b.dipsIntoOuterContext {
+ s += ",dipsIntoOuterContext"
+ }
+
+ return s
+}
+
+// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair
+// for use in lexers.
+func NewOrderedATNConfigSet() *ATNConfigSet {
+ return &ATNConfigSet{
+ cachedHash: -1,
+ // This set uses the standard Hash() and Equals() from ATNConfig
+ configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"),
+ fullCtx: false,
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
similarity index 86%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
rename to vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
index 3c975ec7b..bdb30b362 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
@@ -20,7 +20,7 @@ func (opts *ATNDeserializationOptions) ReadOnly() bool {
func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
if opts.readOnly {
- panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
}
opts.readOnly = readOnly
}
@@ -31,7 +31,7 @@ func (opts *ATNDeserializationOptions) VerifyATN() bool {
func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
if opts.readOnly {
- panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
}
opts.verifyATN = verifyATN
}
@@ -42,11 +42,12 @@ func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
if opts.readOnly {
- panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
}
opts.generateRuleBypassTransitions = generateRuleBypassTransitions
}
+//goland:noinspection GoUnusedExportedFunction
func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
similarity index 97%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
rename to vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
index 3888856b4..2dcb9ae11 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
@@ -35,6 +35,7 @@ func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
return &ATNDeserializer{options: options}
}
+//goland:noinspection GoUnusedFunction
func stringInSlice(a string, list []string) int {
for i, b := range list {
if b == a {
@@ -193,7 +194,7 @@ func (a *ATNDeserializer) readModes(atn *ATN) {
}
}
-func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet {
+func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet {
m := a.readInt()
// Preallocate the needed capacity.
@@ -350,7 +351,7 @@ func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
bypassStart.endState = bypassStop
- atn.defineDecisionState(bypassStart.BaseDecisionState)
+ atn.defineDecisionState(&bypassStart.BaseDecisionState)
bypassStop.startState = bypassStart
@@ -450,7 +451,7 @@ func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
continue
}
- // We analyze the ATN to determine if a ATN decision state is the
+ // We analyze the [ATN] to determine if an ATN decision state is the
// decision for the closure block that determines whether a
// precedence rule should continue or complete.
if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
@@ -553,7 +554,7 @@ func (a *ATNDeserializer) readInt() int {
return int(v) // data is 32 bits but int is at least that big
}
-func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
+func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
target := atn.states[trg]
switch typeIndex {
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
new file mode 100644
index 000000000..afe6c9f80
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false))
+
+type IATNSimulator interface {
+ SharedContextCache() *PredictionContextCache
+ ATN() *ATN
+ DecisionToDFA() []*DFA
+}
+
+type BaseATNSimulator struct {
+ atn *ATN
+ sharedContextCache *PredictionContextCache
+ decisionToDFA []*DFA
+}
+
+func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext {
+ if b.sharedContextCache == nil {
+ return context
+ }
+
+ //visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()")
+ visited := NewVisitRecord()
+ return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
+}
+
+func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
+ return b.sharedContextCache
+}
+
+func (b *BaseATNSimulator) ATN() *ATN {
+ return b.atn
+}
+
+func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
+ return b.decisionToDFA
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_state.go b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
new file mode 100644
index 000000000..2ae5807cd
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
@@ -0,0 +1,461 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+// Constants for serialization.
+const (
+ ATNStateInvalidType = 0
+ ATNStateBasic = 1
+ ATNStateRuleStart = 2
+ ATNStateBlockStart = 3
+ ATNStatePlusBlockStart = 4
+ ATNStateStarBlockStart = 5
+ ATNStateTokenStart = 6
+ ATNStateRuleStop = 7
+ ATNStateBlockEnd = 8
+ ATNStateStarLoopBack = 9
+ ATNStateStarLoopEntry = 10
+ ATNStatePlusLoopBack = 11
+ ATNStateLoopEnd = 12
+
+ ATNStateInvalidStateNumber = -1
+)
+
+//goland:noinspection GoUnusedGlobalVariable
+var ATNStateInitialNumTransitions = 4
+
+type ATNState interface {
+ GetEpsilonOnlyTransitions() bool
+
+ GetRuleIndex() int
+ SetRuleIndex(int)
+
+ GetNextTokenWithinRule() *IntervalSet
+ SetNextTokenWithinRule(*IntervalSet)
+
+ GetATN() *ATN
+ SetATN(*ATN)
+
+ GetStateType() int
+
+ GetStateNumber() int
+ SetStateNumber(int)
+
+ GetTransitions() []Transition
+ SetTransitions([]Transition)
+ AddTransition(Transition, int)
+
+ String() string
+ Hash() int
+ Equals(Collectable[ATNState]) bool
+}
+
+type BaseATNState struct {
+ // NextTokenWithinRule caches lookahead during parsing. Not used during construction.
+ NextTokenWithinRule *IntervalSet
+
+ // atn is the current ATN.
+ atn *ATN
+
+ epsilonOnlyTransitions bool
+
+ // ruleIndex tracks the Rule index because there are no Rule objects at runtime.
+ ruleIndex int
+
+ stateNumber int
+
+ stateType int
+
+ // Track the transitions emanating from this ATN state.
+ transitions []Transition
+}
+
+func NewATNState() *BaseATNState {
+ return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
+}
+
+func (as *BaseATNState) GetRuleIndex() int {
+ return as.ruleIndex
+}
+
+func (as *BaseATNState) SetRuleIndex(v int) {
+ as.ruleIndex = v
+}
+func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
+ return as.epsilonOnlyTransitions
+}
+
+func (as *BaseATNState) GetATN() *ATN {
+ return as.atn
+}
+
+func (as *BaseATNState) SetATN(atn *ATN) {
+ as.atn = atn
+}
+
+func (as *BaseATNState) GetTransitions() []Transition {
+ return as.transitions
+}
+
+func (as *BaseATNState) SetTransitions(t []Transition) {
+ as.transitions = t
+}
+
+func (as *BaseATNState) GetStateType() int {
+ return as.stateType
+}
+
+func (as *BaseATNState) GetStateNumber() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) SetStateNumber(stateNumber int) {
+ as.stateNumber = stateNumber
+}
+
+func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
+ return as.NextTokenWithinRule
+}
+
+func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
+ as.NextTokenWithinRule = v
+}
+
+func (as *BaseATNState) Hash() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) String() string {
+ return strconv.Itoa(as.stateNumber)
+}
+
+func (as *BaseATNState) Equals(other Collectable[ATNState]) bool {
+ if ot, ok := other.(ATNState); ok {
+ return as.stateNumber == ot.GetStateNumber()
+ }
+
+ return false
+}
+
+func (as *BaseATNState) isNonGreedyExitState() bool {
+ return false
+}
+
+func (as *BaseATNState) AddTransition(trans Transition, index int) {
+ if len(as.transitions) == 0 {
+ as.epsilonOnlyTransitions = trans.getIsEpsilon()
+ } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
+ _, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber)
+ as.epsilonOnlyTransitions = false
+ }
+
+ // TODO: Check code for already present compared to the Java equivalent
+ //alreadyPresent := false
+ //for _, t := range as.transitions {
+ // if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() {
+ // if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) {
+ // alreadyPresent = true
+ // break
+ // }
+ // } else if t.getIsEpsilon() && trans.getIsEpsilon() {
+ // alreadyPresent = true
+ // break
+ // }
+ //}
+ //if !alreadyPresent {
+ if index == -1 {
+ as.transitions = append(as.transitions, trans)
+ } else {
+ as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
+ // TODO: as.transitions.splice(index, 1, trans)
+ }
+ //} else {
+ // _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber)
+ //}
+}
+
+type BasicState struct {
+ BaseATNState
+}
+
+func NewBasicState() *BasicState {
+ return &BasicState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ }
+}
+
+type DecisionState interface {
+ ATNState
+
+ getDecision() int
+ setDecision(int)
+
+ getNonGreedy() bool
+ setNonGreedy(bool)
+}
+
+type BaseDecisionState struct {
+ BaseATNState
+ decision int
+ nonGreedy bool
+}
+
+func NewBaseDecisionState() *BaseDecisionState {
+ return &BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ decision: -1,
+ }
+}
+
+func (s *BaseDecisionState) getDecision() int {
+ return s.decision
+}
+
+func (s *BaseDecisionState) setDecision(b int) {
+ s.decision = b
+}
+
+func (s *BaseDecisionState) getNonGreedy() bool {
+ return s.nonGreedy
+}
+
+func (s *BaseDecisionState) setNonGreedy(b bool) {
+ s.nonGreedy = b
+}
+
+type BlockStartState interface {
+ DecisionState
+
+ getEndState() *BlockEndState
+ setEndState(*BlockEndState)
+}
+
+// BaseBlockStartState is the start of a regular (...) block.
+type BaseBlockStartState struct {
+ BaseDecisionState
+ endState *BlockEndState
+}
+
+func NewBlockStartState() *BaseBlockStartState {
+ return &BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ decision: -1,
+ },
+ }
+}
+
+func (s *BaseBlockStartState) getEndState() *BlockEndState {
+ return s.endState
+}
+
+func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
+ s.endState = b
+}
+
+type BasicBlockStartState struct {
+ BaseBlockStartState
+}
+
+func NewBasicBlockStartState() *BasicBlockStartState {
+ return &BasicBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &BasicBlockStartState{}
+
+// BlockEndState is a terminal node of a simple (a|b|c) block.
+type BlockEndState struct {
+ BaseATNState
+ startState ATNState
+}
+
+func NewBlockEndState() *BlockEndState {
+ return &BlockEndState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBlockEnd,
+ },
+ startState: nil,
+ }
+}
+
+// RuleStopState is the last node in the ATN for a rule, unless that rule is the
+// start symbol. In that case, there is one transition to EOF. Later, we might
+// encode references to all calls to this rule to compute FOLLOW sets for error
+// handling.
+type RuleStopState struct {
+ BaseATNState
+}
+
+func NewRuleStopState() *RuleStopState {
+ return &RuleStopState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateRuleStop,
+ },
+ }
+}
+
+type RuleStartState struct {
+ BaseATNState
+ stopState ATNState
+ isPrecedenceRule bool
+}
+
+func NewRuleStartState() *RuleStartState {
+ return &RuleStartState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateRuleStart,
+ },
+ }
+}
+
+// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
+// transitions: one to the loop back to start of the block, and one to exit.
+type PlusLoopbackState struct {
+ BaseDecisionState
+}
+
+func NewPlusLoopbackState() *PlusLoopbackState {
+ return &PlusLoopbackState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStatePlusLoopBack,
+ },
+ },
+ }
+}
+
+// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
+// decision state; we don't use it for code generation. Somebody might need it,
+// it is included for completeness. In reality, PlusLoopbackState is the real
+// decision-making node for A+.
+type PlusBlockStartState struct {
+ BaseBlockStartState
+ loopBackState ATNState
+}
+
+func NewPlusBlockStartState() *PlusBlockStartState {
+ return &PlusBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStatePlusBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &PlusBlockStartState{}
+
+// StarBlockStartState is the block that begins a closure loop.
+type StarBlockStartState struct {
+ BaseBlockStartState
+}
+
+func NewStarBlockStartState() *StarBlockStartState {
+ return &StarBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &StarBlockStartState{}
+
+type StarLoopbackState struct {
+ BaseATNState
+}
+
+func NewStarLoopbackState() *StarLoopbackState {
+ return &StarLoopbackState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarLoopBack,
+ },
+ }
+}
+
+type StarLoopEntryState struct {
+ BaseDecisionState
+ loopBackState ATNState
+ precedenceRuleDecision bool
+}
+
+func NewStarLoopEntryState() *StarLoopEntryState {
+ // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
+ return &StarLoopEntryState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarLoopEntry,
+ },
+ },
+ }
+}
+
+// LoopEndState marks the end of a * or + loop.
+type LoopEndState struct {
+ BaseATNState
+ loopBackState ATNState
+}
+
+func NewLoopEndState() *LoopEndState {
+ return &LoopEndState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateLoopEnd,
+ },
+ }
+}
+
+// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
+type TokensStartState struct {
+ BaseDecisionState
+}
+
+func NewTokensStartState() *TokensStartState {
+ return &TokensStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateTokenStart,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go b/vendor/github.com/antlr4-go/antlr/v4/atn_type.go
similarity index 100%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go
rename to vendor/github.com/antlr4-go/antlr/v4/atn_type.go
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
similarity index 89%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
rename to vendor/github.com/antlr4-go/antlr/v4/char_stream.go
index c33f0adb5..bd8127b6b 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
@@ -8,5 +8,5 @@ type CharStream interface {
IntStream
GetText(int, int) string
GetTextFromTokens(start, end Token) string
- GetTextFromInterval(*Interval) string
+ GetTextFromInterval(Interval) string
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
similarity index 100%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go
rename to vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
similarity index 88%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
rename to vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
index c6c9485a2..b75da9df0 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
@@ -28,22 +28,24 @@ type CommonTokenStream struct {
// trivial with bt field.
fetchedEOF bool
- // index indexs into tokens of the current token (next token to consume).
+ // index into [tokens] of the current token (next token to consume).
// tokens[p] should be LT(1). It is set to -1 when the stream is first
// constructed or when SetTokenSource is called, indicating that the first token
// has not yet been fetched from the token source. For additional information,
- // see the documentation of IntStream for a description of initializing methods.
+ // see the documentation of [IntStream] for a description of initializing methods.
index int
- // tokenSource is the TokenSource from which tokens for the bt stream are
+ // tokenSource is the [TokenSource] from which tokens for the bt stream are
// fetched.
tokenSource TokenSource
- // tokens is all tokens fetched from the token source. The list is considered a
+ // tokens contains all tokens fetched from the token source. The list is considered a
// complete view of the input once fetchedEOF is set to true.
tokens []Token
}
+// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce
+// tokens and will pull tokens from the given lexer channel.
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
return &CommonTokenStream{
channel: channel,
@@ -53,6 +55,7 @@ func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
}
}
+// GetAllTokens returns all tokens currently pulled from the token source.
func (c *CommonTokenStream) GetAllTokens() []Token {
return c.tokens
}
@@ -61,9 +64,11 @@ func (c *CommonTokenStream) Mark() int {
return 0
}
-func (c *CommonTokenStream) Release(marker int) {}
+func (c *CommonTokenStream) Release(_ int) {}
-func (c *CommonTokenStream) reset() {
+func (c *CommonTokenStream) Reset() {
+ c.fetchedEOF = false
+ c.tokens = make([]Token, 0)
c.Seek(0)
}
@@ -107,7 +112,7 @@ func (c *CommonTokenStream) Consume() {
// Sync makes sure index i in tokens has a token and returns true if a token is
// located at index i and otherwise false.
func (c *CommonTokenStream) Sync(i int) bool {
- n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
+ n := i - len(c.tokens) + 1 // How many more elements do we need?
if n > 0 {
fetched := c.fetch(n)
@@ -193,12 +198,13 @@ func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
c.tokenSource = tokenSource
c.tokens = make([]Token, 0)
c.index = -1
+ c.fetchedEOF = false
}
// NextTokenOnChannel returns the index of the next token on channel given a
// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
-// no tokens on channel between i and EOF.
-func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
+// no tokens on channel between 'i' and [TokenEOF].
+func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int {
c.Sync(i)
if i >= len(c.tokens) {
@@ -244,7 +250,7 @@ func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []To
nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
from := tokenIndex + 1
- // If no onchannel to the right, then nextOnChannel == -1, so set to to last token
+ // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token
var to int
if nextOnChannel == -1 {
@@ -314,7 +320,8 @@ func (c *CommonTokenStream) Index() int {
}
func (c *CommonTokenStream) GetAllText() string {
- return c.GetTextFromInterval(nil)
+ c.Fill()
+ return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1))
}
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
@@ -329,15 +336,9 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string
return c.GetTextFromInterval(interval.GetSourceInterval())
}
-func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
+func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string {
c.lazyInit()
-
- if interval == nil {
- c.Fill()
- interval = NewInterval(0, len(c.tokens)-1)
- } else {
- c.Sync(interval.Stop)
- }
+ c.Sync(interval.Stop)
start := interval.Start
stop := interval.Stop
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go b/vendor/github.com/antlr4-go/antlr/v4/comparators.go
similarity index 82%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
rename to vendor/github.com/antlr4-go/antlr/v4/comparators.go
index 9ea320053..7467e9b43 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/comparators.go
@@ -18,17 +18,20 @@ package antlr
// type safety and avoid having to implement this for every type that we want to perform comparison on.
//
// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
-// allows us to use it in any collection instance that does nto require a special hash or equals implementation.
+// allows us to use it in any collection instance that does not require a special hash or equals implementation.
type ObjEqComparator[T Collectable[T]] struct{}
var (
- aStateEqInst = &ObjEqComparator[ATNState]{}
- aConfEqInst = &ObjEqComparator[ATNConfig]{}
- aConfCompInst = &ATNConfigComparator[ATNConfig]{}
- atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{}
+ aStateEqInst = &ObjEqComparator[ATNState]{}
+ aConfEqInst = &ObjEqComparator[*ATNConfig]{}
+
+ // aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache
+ aConfCompInst = &ATNConfigComparator[*ATNConfig]{}
+ atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{}
dfaStateEqInst = &ObjEqComparator[*DFAState]{}
semctxEqInst = &ObjEqComparator[SemanticContext]{}
- atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{}
+ atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{}
+ pContextEqInst = &ObjEqComparator[*PredictionContext]{}
)
// Equals2 delegates to the Equals() method of type T
@@ -44,14 +47,14 @@ func (c *ObjEqComparator[T]) Hash1(o T) int {
type SemCComparator[T Collectable[T]] struct{}
-// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet
+// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet
// and has a custom Equals() and Hash() implementation, because equality is not based on the
// standard Hash() and Equals() methods of the ATNConfig type.
type ATNConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
-func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
+func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -72,7 +75,8 @@ func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
}
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
-func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int {
+func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int {
+
hash := 7
hash = 31*hash + o.GetState().GetStateNumber()
hash = 31*hash + o.GetAlt()
@@ -85,7 +89,7 @@ type ATNAltConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
-func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
+func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -105,21 +109,21 @@ func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
}
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
-func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int {
+func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int {
h := murmurInit(7)
h = murmurUpdate(h, o.GetState().GetStateNumber())
h = murmurUpdate(h, o.GetContext().Hash())
return murmurFinish(h, 2)
}
-// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet
+// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet
// and has a custom Equals() and Hash() implementation, because equality is not based on the
// standard Hash() and Equals() methods of the ATNConfig type.
type BaseATNConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
-func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
+func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -141,7 +145,6 @@ func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
// delegates to the standard Hash() method of the ATNConfig type.
-func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int {
-
+func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int {
return o.Hash()
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/configuration.go b/vendor/github.com/antlr4-go/antlr/v4/configuration.go
new file mode 100644
index 000000000..c2b724514
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/configuration.go
@@ -0,0 +1,214 @@
+package antlr
+
+type runtimeConfiguration struct {
+ statsTraceStacks bool
+ lexerATNSimulatorDebug bool
+ lexerATNSimulatorDFADebug bool
+ parserATNSimulatorDebug bool
+ parserATNSimulatorTraceATNSim bool
+ parserATNSimulatorDFADebug bool
+ parserATNSimulatorRetryDebug bool
+ lRLoopEntryBranchOpt bool
+ memoryManager bool
+}
+
+// Global runtime configuration
+var runtimeConfig = runtimeConfiguration{
+ lRLoopEntryBranchOpt: true,
+}
+
+type runtimeOption func(*runtimeConfiguration) error
+
+// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options.
+// It uses the functional options pattern for go. This is a package global function as it operates on the runtime
+// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is
+// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the
+// only maintainer). However, it is possible that you might want to use this to set a global option concerning the
+// memory allocation type used by the runtime such as sync.Pool or not.
+//
+// The options are applied in the order they are passed in, so the last option will override any previous options.
+//
+// For example, if you want to turn on the collection create point stack flag to true, you can do:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
+//
+// If you want to turn it off, you can do:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
+func ConfigureRuntime(options ...runtimeOption) error {
+ for _, option := range options {
+ err := option(&runtimeConfig)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of
+// certain structs, such as collections, or the use point of certain methods such as Put().
+// Because this can be expensive, it is turned off by default. However, it
+// can be useful to track down exactly where memory is being created and used.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
+func WithStatsTraceStacks(trace bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.statsTraceStacks = trace
+ return nil
+ }
+}
+
+// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN]
+// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false))
+func WithLexerATNSimulatorDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lexerATNSimulatorDebug = debug
+ return nil
+ }
+}
+
+// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA]
+// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false))
+func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lexerATNSimulatorDFADebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN]
+// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false))
+func WithParserATNSimulatorDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorDebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator
+// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false))
+func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorTraceATNSim = trace
+ return nil
+ }
+}
+
+// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
+// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false))
+func WithParserATNSimulatorDFADebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorDFADebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
+// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime.
+// Only useful to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false))
+func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorRetryDebug = debug
+ return nil
+ }
+}
+
+// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be
+// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime.
+// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator].
+//
+// Note that default is to use this optimization.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false))
+func WithLRLoopEntryBranchOpt(off bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lRLoopEntryBranchOpt = off
+ return nil
+ }
+}
+
+// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful
+// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which
+// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute
+// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource
+// grammar, this may help make it more practical.
+//
+// Note that default is to use normal Go memory allocation and not pool memory.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithMemoryManager(true))
+//
+// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other
+// and should remember to nil out any references to the parser or lexer when you are done with them.
+func WithMemoryManager(use bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.memoryManager = use
+ return nil
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go b/vendor/github.com/antlr4-go/antlr/v4/dfa.go
similarity index 76%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
rename to vendor/github.com/antlr4-go/antlr/v4/dfa.go
index bfd43e1f7..6b63eb158 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/dfa.go
@@ -4,6 +4,8 @@
package antlr
+// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can
+// reach and the transitions between them.
type DFA struct {
// atnStartState is the ATN state in which this was created
atnStartState DecisionState
@@ -12,10 +14,9 @@ type DFA struct {
// states is all the DFA states. Use Map to get the old state back; Set can only
// indicate whether it is there. Go maps implement key hash collisions and so on and are very
- // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva
+ // good, but the DFAState is an object and can't be used directly as the key as it can in say Java
// amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
- // to see if they really are the same object.
- //
+ // to see if they really are the same object. Hence, we have our own map storage.
//
states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
@@ -32,11 +33,11 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA {
dfa := &DFA{
atnStartState: atnStartState,
decision: decision,
- states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst),
+ states: nil, // Lazy initialize
}
if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
dfa.precedenceDfa = true
- dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false))
+ dfa.s0 = NewDFAState(-1, NewATNConfigSet(false))
dfa.s0.isAcceptState = false
dfa.s0.requiresFullContext = false
}
@@ -95,12 +96,11 @@ func (d *DFA) getPrecedenceDfa() bool {
// true or nil otherwise, and d.precedenceDfa is updated.
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
if d.getPrecedenceDfa() != precedenceDfa {
- d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst)
+ d.states = nil // Lazy initialize
d.numstates = 0
if precedenceDfa {
- precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
-
+ precedenceState := NewDFAState(-1, NewATNConfigSet(false))
precedenceState.setEdges(make([]*DFAState, 0))
precedenceState.isAcceptState = false
precedenceState.requiresFullContext = false
@@ -113,6 +113,31 @@ func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
}
}
+// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy
+// instantiation of the states JMap.
+func (d *DFA) Len() int {
+ if d.states == nil {
+ return 0
+ }
+ return d.states.Len()
+}
+
+// Get returns a state that matches s if it is present in the DFA state set. We defer to this
+// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap.
+func (d *DFA) Get(s *DFAState) (*DFAState, bool) {
+ if d.states == nil {
+ return nil, false
+ }
+ return d.states.Get(s)
+}
+
+func (d *DFA) Put(s *DFAState) (*DFAState, bool) {
+ if d.states == nil {
+ d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put")
+ }
+ return d.states.Put(s)
+}
+
func (d *DFA) getS0() *DFAState {
return d.s0
}
@@ -121,9 +146,11 @@ func (d *DFA) setS0(s *DFAState) {
d.s0 = s
}
-// sortedStates returns the states in d sorted by their state number.
+// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil.
func (d *DFA) sortedStates() []*DFAState {
-
+ if d.states == nil {
+ return []*DFAState{}
+ }
vs := d.states.SortedSlice(func(i, j *DFAState) bool {
return i.stateNumber < j.stateNumber
})
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
similarity index 97%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
rename to vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
index 84d0a31e5..0e1100989 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
@@ -10,7 +10,7 @@ import (
"strings"
)
-// DFASerializer is a DFA walker that knows how to dump them to serialized
+// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized
// strings.
type DFASerializer struct {
dfa *DFA
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
similarity index 81%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
rename to vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
index c90dec55c..654143074 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
@@ -22,30 +22,31 @@ func (p *PredPrediction) String() string {
return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
}
-// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
+// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi,
// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
// states the ATN can be in after reading each input symbol. That is to say,
-// after reading input a1a2..an, the DFA is in a state that represents the
+// after reading input a1, a2,..an, the DFA is in a state that represents the
// subset T of the states of the ATN that are reachable from the ATN's start
-// state along some path labeled a1a2..an." In conventional NFA-to-DFA
-// conversion, therefore, the subset T would be a bitset representing the set of
-// states the ATN could be in. We need to track the alt predicted by each state
+// state along some path labeled a1a2..an."
+//
+// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of
+// states the [ATN] could be in. We need to track the alt predicted by each state
// as well, however. More importantly, we need to maintain a stack of states,
// tracking the closure operations as they jump from rule to rule, emulating
// rule invocations (method calls). I have to add a stack to simulate the proper
// lookahead sequences for the underlying LL grammar from which the ATN was
// derived.
//
-// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
-// state (ala normal conversion) and a RuleContext describing the chain of rules
+// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a
+// state (ala normal conversion) and a [RuleContext] describing the chain of rules
// (if any) followed to arrive at that state.
//
-// A DFAState may have multiple references to a particular state, but with
-// different ATN contexts (with same or different alts) meaning that state was
+// A [DFAState] may have multiple references to a particular state, but with
+// different [ATN] contexts (with same or different alts) meaning that state was
// reached via a different set of rule invocations.
type DFAState struct {
stateNumber int
- configs ATNConfigSet
+ configs *ATNConfigSet
// edges elements point to the target of the symbol. Shift up by 1 so (-1)
// Token.EOF maps to the first element.
@@ -53,7 +54,7 @@ type DFAState struct {
isAcceptState bool
- // prediction is the ttype we match or alt we predict if the state is accept.
+ // prediction is the 'ttype' we match or alt we predict if the state is 'accept'.
// Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
// requiresFullContext.
prediction int
@@ -81,9 +82,9 @@ type DFAState struct {
predicates []*PredPrediction
}
-func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
+func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
if configs == nil {
- configs = NewBaseATNConfigSet(false)
+ configs = NewATNConfigSet(false)
}
return &DFAState{configs: configs, stateNumber: stateNumber}
@@ -94,7 +95,7 @@ func (d *DFAState) GetAltSet() []int {
var alts []int
if d.configs != nil {
- for _, c := range d.configs.GetItems() {
+ for _, c := range d.configs.configs {
alts = append(alts, c.GetAlt())
}
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
similarity index 92%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
rename to vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
index c55bcc19b..bd2cd8bc3 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
@@ -33,6 +33,7 @@ type DiagnosticErrorListener struct {
exactOnly bool
}
+//goland:noinspection GoUnusedExportedFunction
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
n := new(DiagnosticErrorListener)
@@ -42,7 +43,7 @@ func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
return n
}
-func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
if d.exactOnly && !exact {
return
}
@@ -55,7 +56,7 @@ func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, s
recognizer.NotifyErrorListeners(msg, nil, nil)
}
-func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) {
msg := "reportAttemptingFullContext d=" +
d.getDecisionDescription(recognizer, dfa) +
@@ -64,7 +65,7 @@ func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser,
recognizer.NotifyErrorListeners(msg, nil, nil)
}
-func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) {
msg := "reportContextSensitivity d=" +
d.getDecisionDescription(recognizer, dfa) +
", input='" +
@@ -96,12 +97,12 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa
// @param configs The conflicting or ambiguous configuration set.
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
// returns the set of alternatives represented in {@code configs}.
-func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
+func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet {
if ReportedAlts != nil {
return ReportedAlts
}
result := NewBitSet()
- for _, c := range set.GetItems() {
+ for _, c := range set.configs {
result.add(c.GetAlt())
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
new file mode 100644
index 000000000..21a021643
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
@@ -0,0 +1,100 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+// Provides an empty default implementation of {@link ANTLRErrorListener}. The
+// default implementation of each method does nothing, but can be overridden as
+// necessary.
+
+type ErrorListener interface {
+ SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
+ ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet)
+ ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet)
+ ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet)
+}
+
+type DefaultErrorListener struct {
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewDefaultErrorListener() *DefaultErrorListener {
+ return new(DefaultErrorListener)
+}
+
+func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) {
+}
+
+func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) {
+}
+
+type ConsoleErrorListener struct {
+ *DefaultErrorListener
+}
+
+func NewConsoleErrorListener() *ConsoleErrorListener {
+ return new(ConsoleErrorListener)
+}
+
+// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}.
+var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
+
+// SyntaxError prints messages to System.err containing the
+// values of line, charPositionInLine, and msg using
+// the following format:
+//
+// line :
+func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) {
+ _, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
+}
+
+type ProxyErrorListener struct {
+ *DefaultErrorListener
+ delegates []ErrorListener
+}
+
+func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
+ if delegates == nil {
+ panic("delegates is not provided")
+ }
+ l := new(ProxyErrorListener)
+ l.delegates = delegates
+ return l
+}
+
+func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ for _, d := range p.delegates {
+ d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
new file mode 100644
index 000000000..9db2be1c7
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
@@ -0,0 +1,702 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+type ErrorStrategy interface {
+ reset(Parser)
+ RecoverInline(Parser) Token
+ Recover(Parser, RecognitionException)
+ Sync(Parser)
+ InErrorRecoveryMode(Parser) bool
+ ReportError(Parser, RecognitionException)
+ ReportMatch(Parser)
+}
+
+// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for
+// error reporting and recovery in ANTLR parsers.
+type DefaultErrorStrategy struct {
+ errorRecoveryMode bool
+ lastErrorIndex int
+ lastErrorStates *IntervalSet
+}
+
+var _ ErrorStrategy = &DefaultErrorStrategy{}
+
+func NewDefaultErrorStrategy() *DefaultErrorStrategy {
+
+ d := new(DefaultErrorStrategy)
+
+ // Indicates whether the error strategy is currently "recovering from an
+ // error". This is used to suppress Reporting multiple error messages while
+ // attempting to recover from a detected syntax error.
+ //
+ // @see //InErrorRecoveryMode
+ //
+ d.errorRecoveryMode = false
+
+ // The index into the input stream where the last error occurred.
+ // This is used to prevent infinite loops where an error is found
+ // but no token is consumed during recovery...another error is found,
+ // ad nauseam. This is a failsafe mechanism to guarantee that at least
+ // one token/tree node is consumed for two errors.
+ //
+ d.lastErrorIndex = -1
+ d.lastErrorStates = nil
+ return d
+}
+
+// The default implementation simply calls {@link //endErrorCondition} to
+// ensure that the handler is not in error recovery mode.
+func (d *DefaultErrorStrategy) reset(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+// This method is called to enter error recovery mode when a recognition
+// exception is Reported.
+func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) {
+ d.errorRecoveryMode = true
+}
+
+func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool {
+ return d.errorRecoveryMode
+}
+
+// This method is called to leave error recovery mode after recovering from
+// a recognition exception.
+func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) {
+ d.errorRecoveryMode = false
+ d.lastErrorStates = nil
+ d.lastErrorIndex = -1
+}
+
+// ReportMatch is the default implementation of error matching and simply calls endErrorCondition.
+func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+// ReportError is the default implementation of error reporting.
+// It returns immediately if the handler is already
+// in error recovery mode. Otherwise, it calls [beginErrorCondition]
+// and dispatches the Reporting task based on the runtime type of e
+// according to the following table.
+//
+// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative]
+// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch]
+// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate]
+// All other types : Calls [NotifyErrorListeners] to Report the exception
+func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
+ // if we've already Reported an error and have not Matched a token
+ // yet successfully, don't Report any errors.
+ if d.InErrorRecoveryMode(recognizer) {
+ return // don't Report spurious errors
+ }
+ d.beginErrorCondition(recognizer)
+
+ switch t := e.(type) {
+ default:
+ fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
+ // fmt.Println(e.stack)
+ recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
+ case *NoViableAltException:
+ d.ReportNoViableAlternative(recognizer, t)
+ case *InputMisMatchException:
+ d.ReportInputMisMatch(recognizer, t)
+ case *FailedPredicateException:
+ d.ReportFailedPredicate(recognizer, t)
+ }
+}
+
+// Recover is the default recovery implementation.
+// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set -
+// loosely the set of tokens that can follow the current rule.
+func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) {
+
+ if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
+ d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
+ // uh oh, another error at same token index and previously-Visited
+ // state in ATN must be a case where LT(1) is in the recovery
+ // token set so nothing got consumed. Consume a single token
+ // at least to prevent an infinite loop d is a failsafe.
+ recognizer.Consume()
+ }
+ d.lastErrorIndex = recognizer.GetInputStream().Index()
+ if d.lastErrorStates == nil {
+ d.lastErrorStates = NewIntervalSet()
+ }
+ d.lastErrorStates.addOne(recognizer.GetState())
+ followSet := d.GetErrorRecoverySet(recognizer)
+ d.consumeUntil(recognizer, followSet)
+}
+
+// Sync is the default implementation of error strategy synchronization.
+//
+// This Sync makes sure that the current lookahead symbol is consistent with what were expecting
+// at this point in the [ATN]. You can call this anytime but ANTLR only
+// generates code to check before sub-rules/loops and each iteration.
+//
+// Implements [Jim Idle]'s magic Sync mechanism in closures and optional
+// sub-rules. E.g.:
+//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+//
+// At the start of a sub-rule upon error, Sync performs single
+// token deletion, if possible. If it can't do that, it bails on the current
+// rule and uses the default error recovery, which consumes until the
+// reSynchronization set of the current rule.
+//
+// If the sub-rule is optional
+//
+// ({@code (...)?}, {@code (...)*},
+//
+// or a block with an empty alternative), then the expected set includes what follows
+// the sub-rule.
+//
+// During loop iteration, it consumes until it sees a token that can start a
+// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to
+// stay in the loop as long as possible.
+//
+// # Origins
+//
+// Previous versions of ANTLR did a poor job of their recovery within loops.
+// A single mismatch token or missing token would force the parser to bail
+// out of the entire rules surrounding the loop. So, for rule:
+//
+// classfunc : 'class' ID '{' member* '}'
+//
+// input with an extra token between members would force the parser to
+// consume until it found the next class definition rather than the next
+// member definition of the current class.
+//
+// This functionality cost a bit of effort because the parser has to
+// compare the token set at the start of the loop and at each iteration. If for
+// some reason speed is suffering for you, you can turn off this
+// functionality by simply overriding this method as empty:
+//
+// { }
+//
+// [Jim Idle]: https://github.com/jimidle
+func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
+ // If already recovering, don't try to Sync
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ la := recognizer.GetTokenStream().LA(1)
+
+ // try cheaper subset first might get lucky. seems to shave a wee bit off
+ nextTokens := recognizer.GetATN().NextTokens(s, nil)
+ if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
+ return
+ }
+
+ switch s.GetStateType() {
+ case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
+ // Report error and recover if possible
+ if d.SingleTokenDeletion(recognizer) != nil {
+ return
+ }
+ recognizer.SetError(NewInputMisMatchException(recognizer))
+ case ATNStatePlusLoopBack, ATNStateStarLoopBack:
+ d.ReportUnwantedToken(recognizer)
+ expecting := NewIntervalSet()
+ expecting.addSet(recognizer.GetExpectedTokens())
+ whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer))
+ d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
+ default:
+ // do nothing if we can't identify the exact kind of ATN state
+ }
+}
+
+// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException].
+//
+// See also [ReportError]
+func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
+ tokens := recognizer.GetTokenStream()
+ var input string
+ if tokens != nil {
+ if e.startToken.GetTokenType() == TokenEOF {
+ input = ""
+ } else {
+ input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
+ }
+ } else {
+ input = ""
+ }
+ msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException]
+//
+// See also: [ReportError]
+func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
+ msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) +
+ " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException].
+//
+// See also: [ReportError]
+func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
+ ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
+ msg := "rule " + ruleName + " " + e.message
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportUnwantedToken is called to report a syntax error that requires the removal
+// of a token from the input stream. At the time d method is called, the
+// erroneous symbol is the current LT(1) symbol and has not yet been
+// removed from the input stream. When this method returns,
+// recognizer is in error recovery mode.
+//
+// This method is called when singleTokenDeletion identifies
+// single-token deletion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls beginErrorCondition to
+// enter error recovery mode, followed by calling
+// [NotifyErrorListeners]
+func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ tokenName := d.GetTokenErrorDisplay(t)
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "extraneous input " + tokenName + " expecting " +
+ expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// ReportMissingToken is called to report a syntax error which requires the
+// insertion of a missing token into the input stream. At the time this
+// method is called, the missing token has not yet been inserted. When this
+// method returns, recognizer is in error recovery mode.
+//
+// This method is called when singleTokenInsertion identifies
+// single-token insertion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls beginErrorCondition to
+// enter error recovery mode, followed by calling [NotifyErrorListeners]
+func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
+ " at " + d.GetTokenErrorDisplay(t)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// The RecoverInline default implementation attempts to recover from the mismatched input
+// by using single token insertion and deletion as described below. If the
+// recovery attempt fails, this method panics with [InputMisMatchException}.
+// TODO: Not sure that panic() is the right thing to do here - JI
+//
+// # EXTRA TOKEN (single token deletion)
+//
+// LA(1) is not what we are looking for. If LA(2) has the
+// right token, however, then assume LA(1) is some extra spurious
+// token and delete it. Then consume and return the next token (which was
+// the LA(2) token) as the successful result of the Match operation.
+//
+// # This recovery strategy is implemented by singleTokenDeletion
+//
+// # MISSING TOKEN (single token insertion)
+//
+// If current token -at LA(1) - is consistent with what could come
+// after the expected LA(1) token, then assume the token is missing
+// and use the parser's [TokenFactory] to create it on the fly. The
+// “insertion” is performed by returning the created token as the successful
+// result of the Match operation.
+//
+// This recovery strategy is implemented by [SingleTokenInsertion].
+//
+// # Example
+//
+// For example, Input i=(3 is clearly missing the ')'. When
+// the parser returns from the nested call to expr, it will have
+// call the chain:
+//
+// stat → expr → atom
+//
+// and it will be trying to Match the ')' at this point in the
+// derivation:
+//
+// : ID '=' '(' INT ')' ('+' atom)* ';'
+// ^
+//
+// The attempt to [Match] ')' will fail when it sees ';' and
+// call [RecoverInline]. To recover, it sees that LA(1)==';'
+// is in the set of tokens that can follow the ')' token reference
+// in rule atom. It can assume that you forgot the ')'.
+func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
+ // SINGLE TOKEN DELETION
+ MatchedSymbol := d.SingleTokenDeletion(recognizer)
+ if MatchedSymbol != nil {
+ // we have deleted the extra token.
+ // now, move past ttype token as if all were ok
+ recognizer.Consume()
+ return MatchedSymbol
+ }
+ // SINGLE TOKEN INSERTION
+ if d.SingleTokenInsertion(recognizer) {
+ return d.GetMissingSymbol(recognizer)
+ }
+ // even that didn't work must panic the exception
+ recognizer.SetError(NewInputMisMatchException(recognizer))
+ return nil
+}
+
+// SingleTokenInsertion implements the single-token insertion inline error recovery
+// strategy. It is called by [RecoverInline] if the single-token
+// deletion strategy fails to recover from the mismatched input. If this
+// method returns {@code true}, {@code recognizer} will be in error recovery
+// mode.
+//
+// This method determines whether single-token insertion is viable by
+// checking if the LA(1) input symbol could be successfully Matched
+// if it were instead the LA(2) symbol. If this method returns
+// {@code true}, the caller is responsible for creating and inserting a
+// token with the correct type to produce this behavior.
+//
+// This func returns true if single-token insertion is a viable recovery
+// strategy for the current mismatched input.
+func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
+ currentSymbolType := recognizer.GetTokenStream().LA(1)
+ // if current token is consistent with what could come after current
+ // ATN state, then we know we're missing a token error recovery
+ // is free to conjure up and insert the missing token
+ atn := recognizer.GetInterpreter().atn
+ currentState := atn.states[recognizer.GetState()]
+ next := currentState.GetTransitions()[0].getTarget()
+ expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
+ if expectingAtLL2.contains(currentSymbolType) {
+ d.ReportMissingToken(recognizer)
+ return true
+ }
+
+ return false
+}
+
+// SingleTokenDeletion implements the single-token deletion inline error recovery
+// strategy. It is called by [RecoverInline] to attempt to recover
+// from mismatched input. If this method returns nil, the parser and error
+// handler state will not have changed. If this method returns non-nil,
+// recognizer will not be in error recovery mode since the
+// returned token was a successful Match.
+//
+// If the single-token deletion is successful, this method calls
+// [ReportUnwantedToken] to Report the error, followed by
+// [Consume] to actually “delete” the extraneous token. Then,
+// before returning, [ReportMatch] is called to signal a successful
+// Match.
+//
+// The func returns the successfully Matched [Token] instance if single-token
+// deletion successfully recovers from the mismatched input, otherwise nil.
+func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
+ NextTokenType := recognizer.GetTokenStream().LA(2)
+ expecting := d.GetExpectedTokens(recognizer)
+ if expecting.contains(NextTokenType) {
+ d.ReportUnwantedToken(recognizer)
+ // print("recoverFromMisMatchedToken deleting " \
+ // + str(recognizer.GetTokenStream().LT(1)) \
+ // + " since " + str(recognizer.GetTokenStream().LT(2)) \
+ // + " is what we want", file=sys.stderr)
+ recognizer.Consume() // simply delete extra token
+ // we want to return the token we're actually Matching
+ MatchedSymbol := recognizer.GetCurrentToken()
+ d.ReportMatch(recognizer) // we know current token is correct
+ return MatchedSymbol
+ }
+
+ return nil
+}
+
+// GetMissingSymbol conjures up a missing token during error recovery.
+//
+// The recognizer attempts to recover from single missing
+// symbols. But, actions might refer to that missing symbol.
+// For example:
+//
+// x=ID {f($x)}.
+//
+// The action clearly assumes
+// that there has been an identifier Matched previously and that
+// $x points at that token. If that token is missing, but
+// the next token in the stream is what we want we assume that
+// this token is missing, and we keep going. Because we
+// have to return some token to replace the missing token,
+// we have to conjure one up. This method gives the user control
+// over the tokens returned for missing tokens. Mostly,
+// you will want to create something special for identifier
+// tokens. For literals such as '{' and ',', the default
+// action in the parser or tree parser works. It simply creates
+// a [CommonToken] of the appropriate type. The text will be the token name.
+// If you need to change which tokens must be created by the lexer,
+// override this method to create the appropriate tokens.
+func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
+ currentSymbol := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ expectedTokenType := expecting.first()
+ var tokenText string
+
+ if expectedTokenType == TokenEOF {
+ tokenText = ""
+ } else {
+ ln := recognizer.GetLiteralNames()
+ if expectedTokenType > 0 && expectedTokenType < len(ln) {
+ tokenText = ""
+ } else {
+ tokenText = "" // TODO: matches the JS impl
+ }
+ }
+ current := currentSymbol
+ lookback := recognizer.GetTokenStream().LT(-1)
+ if current.GetTokenType() == TokenEOF && lookback != nil {
+ current = lookback
+ }
+
+ tf := recognizer.GetTokenFactory()
+
+ return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
+}
+
+func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
+ return recognizer.GetExpectedTokens()
+}
+
+// GetTokenErrorDisplay determines how a token should be displayed in an error message.
+// The default is to display just the text, but during development you might
+// want to have a lot of information spit out. Override this func in that case
+// to use t.String() (which, for [CommonToken], dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a new type.
+func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ return d.escapeWSAndQuote(s)
+}
+
+func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ return "'" + s + "'"
+}
+
+// GetErrorRecoverySet computes the error recovery set for the current rule. During
+// rule invocation, the parser pushes the set of tokens that can
+// follow that rule reference on the stack. This amounts to
+// computing FIRST of what follows the rule reference in the
+// enclosing rule. See LinearApproximator.FIRST().
+//
+// This local follow set only includes tokens
+// from within the rule i.e., the FIRST computation done by
+// ANTLR stops at the end of a rule.
+//
+// # Example
+//
+// When you find a "no viable alt exception", the input is not
+// consistent with any of the alternatives for rule r. The best
+// thing to do is to consume tokens until you see something that
+// can legally follow a call to r or any rule that called r.
+// You don't want the exact set of viable next tokens because the
+// input might just be missing a token--you might consume the
+// rest of the input looking for one of the missing tokens.
+//
+// Consider the grammar:
+//
+// a : '[' b ']'
+// | '(' b ')'
+// ;
+//
+// b : c '^' INT
+// ;
+//
+// c : ID
+// | INT
+// ;
+//
+// At each rule invocation, the set of tokens that could follow
+// that rule is pushed on a stack. Here are the various
+// context-sensitive follow sets:
+//
+// FOLLOW(b1_in_a) = FIRST(']') = ']'
+// FOLLOW(b2_in_a) = FIRST(')') = ')'
+// FOLLOW(c_in_b) = FIRST('^') = '^'
+//
+// Upon erroneous input “[]”, the call chain is
+//
+// a → b → c
+//
+// and, hence, the follow context stack is:
+//
+// Depth Follow set Start of rule execution
+// 0 a (from main())
+// 1 ']' b
+// 2 '^' c
+//
+// Notice that ')' is not included, because b would have to have
+// been called from a different context in rule a for ')' to be
+// included.
+//
+// For error recovery, we cannot consider FOLLOW(c)
+// (context-sensitive or otherwise). We need the combined set of
+// all context-sensitive FOLLOW sets - the set of all tokens that
+// could follow any reference in the call chain. We need to
+// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
+// we reSync'd to that token, we'd consume until EOF. We need to
+// Sync to context-sensitive FOLLOWs for a, b, and c:
+//
+// {']','^'}
+//
+// In this case, for input "[]", LA(1) is ']' and in the set, so we would
+// not consume anything. After printing an error, rule c would
+// return normally. Rule b would not find the required '^' though.
+// At this point, it gets a mismatched token error and panics an
+// exception (since LA(1) is not in the viable following token
+// set). The rule exception handler tries to recover, but finds
+// the same recovery set and doesn't consume anything. Rule b
+// exits normally returning to rule a. Now it finds the ']' (and
+// with the successful Match exits errorRecovery mode).
+//
+// So, you can see that the parser walks up the call chain looking
+// for the token that was a member of the recovery set.
+//
+// Errors are not generated in errorRecovery mode.
+//
+// ANTLR's error recovery mechanism is based upon original ideas:
+//
+// [Algorithms + Data Structures = Programs] by Niklaus Wirth and
+// [A note on error recovery in recursive descent parsers].
+//
+// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent
+// Parsers]
+//
+// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead
+// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs
+//
+// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905
+// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE
+// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet {
+ atn := recognizer.GetInterpreter().atn
+ ctx := recognizer.GetParserRuleContext()
+ recoverSet := NewIntervalSet()
+ for ctx != nil && ctx.GetInvokingState() >= 0 {
+ // compute what follows who invoked us
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ recoverSet.addSet(follow)
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ recoverSet.removeOne(TokenEpsilon)
+ return recoverSet
+}
+
+// Consume tokens until one Matches the given token set.//
+func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
+ ttype := recognizer.GetTokenStream().LA(1)
+ for ttype != TokenEOF && !set.contains(ttype) {
+ recognizer.Consume()
+ ttype = recognizer.GetTokenStream().LA(1)
+ }
+}
+
+// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors
+// by immediately canceling the parse operation with a
+// [ParseCancellationException]. The implementation ensures that the
+// [ParserRuleContext//exception] field is set for all parse tree nodes
+// that were not completed prior to encountering the error.
+//
+// This error strategy is useful in the following scenarios.
+//
+// - Two-stage parsing: This error strategy allows the first
+// stage of two-stage parsing to immediately terminate if an error is
+// encountered, and immediately fall back to the second stage. In addition to
+// avoiding wasted work by attempting to recover from errors here, the empty
+// implementation of [BailErrorStrategy.Sync] improves the performance of
+// the first stage.
+//
+// - Silent validation: When syntax errors are not being
+// Reported or logged, and the parse result is simply ignored if errors occur,
+// the [BailErrorStrategy] avoids wasting work on recovering from errors
+// when the result will be ignored either way.
+//
+// myparser.SetErrorHandler(NewBailErrorStrategy())
+//
+// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)]
+type BailErrorStrategy struct {
+ *DefaultErrorStrategy
+}
+
+var _ ErrorStrategy = &BailErrorStrategy{}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewBailErrorStrategy() *BailErrorStrategy {
+
+ b := new(BailErrorStrategy)
+
+ b.DefaultErrorStrategy = NewDefaultErrorStrategy()
+
+ return b
+}
+
+// Recover Instead of recovering from exception e, re-panic it wrapped
+// in a [ParseCancellationException] so it is not caught by the
+// rule func catches. Use Exception.GetCause() to get the
+// original [RecognitionException].
+func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
+ context := recognizer.GetParserRuleContext()
+ for context != nil {
+ context.SetException(e)
+ if parent, ok := context.GetParent().(ParserRuleContext); ok {
+ context = parent
+ } else {
+ context = nil
+ }
+ }
+ recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly
+}
+
+// RecoverInline makes sure we don't attempt to recover inline if the parser
+// successfully recovers, it won't panic an exception.
+func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
+ b.Recover(recognizer, NewInputMisMatchException(recognizer))
+
+ return nil
+}
+
+// Sync makes sure we don't attempt to recover from problems in sub-rules.
+func (b *BailErrorStrategy) Sync(_ Parser) {
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/errors.go b/vendor/github.com/antlr4-go/antlr/v4/errors.go
new file mode 100644
index 000000000..8f0f2f601
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/errors.go
@@ -0,0 +1,259 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
+// 3 kinds of errors: prediction errors, failed predicate errors, and
+// mismatched input errors. In each case, the parser knows where it is
+// in the input, where it is in the ATN, the rule invocation stack,
+// and what kind of problem occurred.
+
+type RecognitionException interface {
+ GetOffendingToken() Token
+ GetMessage() string
+ GetInputStream() IntStream
+}
+
+type BaseRecognitionException struct {
+ message string
+ recognizer Recognizer
+ offendingToken Token
+ offendingState int
+ ctx RuleContext
+ input IntStream
+}
+
+func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
+
+ // todo
+ // Error.call(this)
+ //
+ // if (!!Error.captureStackTrace) {
+ // Error.captureStackTrace(this, RecognitionException)
+ // } else {
+ // stack := NewError().stack
+ // }
+ // TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int
+
+ t := new(BaseRecognitionException)
+
+ t.message = message
+ t.recognizer = recognizer
+ t.input = input
+ t.ctx = ctx
+
+ // The current Token when an error occurred. Since not all streams
+ // support accessing symbols by index, we have to track the {@link Token}
+ // instance itself.
+ //
+ t.offendingToken = nil
+
+ // Get the ATN state number the parser was in at the time the error
+ // occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the
+ // DecisionState number. For others, it is the state whose outgoing edge we couldn't Match.
+ //
+ t.offendingState = -1
+ if t.recognizer != nil {
+ t.offendingState = t.recognizer.GetState()
+ }
+
+ return t
+}
+
+func (b *BaseRecognitionException) GetMessage() string {
+ return b.message
+}
+
+func (b *BaseRecognitionException) GetOffendingToken() Token {
+ return b.offendingToken
+}
+
+func (b *BaseRecognitionException) GetInputStream() IntStream {
+ return b.input
+}
+
+// If the state number is not known, b method returns -1.
+
+// getExpectedTokens gets the set of input symbols which could potentially follow the
+// previously Matched symbol at the time this exception was raised.
+//
+// If the set of expected tokens is not known and could not be computed,
+// this method returns nil.
+//
+// The func returns the set of token types that could potentially follow the current
+// state in the {ATN}, or nil if the information is not available.
+
+func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
+ if b.recognizer != nil {
+ return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
+ }
+
+ return nil
+}
+
+func (b *BaseRecognitionException) String() string {
+ return b.message
+}
+
+type LexerNoViableAltException struct {
+ *BaseRecognitionException
+
+ startIndex int
+ deadEndConfigs *ATNConfigSet
+}
+
+func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException {
+
+ l := new(LexerNoViableAltException)
+
+ l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
+
+ l.startIndex = startIndex
+ l.deadEndConfigs = deadEndConfigs
+
+ return l
+}
+
+func (l *LexerNoViableAltException) String() string {
+ symbol := ""
+ if l.startIndex >= 0 && l.startIndex < l.input.Size() {
+ symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
+ }
+ return "LexerNoViableAltException" + symbol
+}
+
+type NoViableAltException struct {
+ *BaseRecognitionException
+
+ startToken Token
+ offendingToken Token
+ ctx ParserRuleContext
+ deadEndConfigs *ATNConfigSet
+}
+
+// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths
+// to take based upon the remaining input. It tracks the starting token
+// of the offending input and also knows where the parser was
+// in the various paths when the error.
+//
+// Reported by [ReportNoViableAlternative]
+func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
+
+ if ctx == nil {
+ ctx = recognizer.GetParserRuleContext()
+ }
+
+ if offendingToken == nil {
+ offendingToken = recognizer.GetCurrentToken()
+ }
+
+ if startToken == nil {
+ startToken = recognizer.GetCurrentToken()
+ }
+
+ if input == nil {
+ input = recognizer.GetInputStream().(TokenStream)
+ }
+
+ n := new(NoViableAltException)
+ n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
+
+ // Which configurations did we try at input.Index() that couldn't Match
+ // input.LT(1)
+ n.deadEndConfigs = deadEndConfigs
+
+ // The token object at the start index the input stream might
+ // not be buffering tokens so get a reference to it.
+ //
+ // At the time the error occurred, of course the stream needs to keep a
+ // buffer of all the tokens, but later we might not have access to those.
+ n.startToken = startToken
+ n.offendingToken = offendingToken
+
+ return n
+}
+
+type InputMisMatchException struct {
+ *BaseRecognitionException
+}
+
+// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as
+// when the current input does not Match the expected token.
+func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
+
+ i := new(InputMisMatchException)
+ i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ i.offendingToken = recognizer.GetCurrentToken()
+
+ return i
+
+}
+
+// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates
+// occurs when normally parsing the alternative just like Matching a token.
+// Disambiguating predicate evaluation occurs when we test a predicate during
+// prediction.
+type FailedPredicateException struct {
+ *BaseRecognitionException
+
+ ruleIndex int
+ predicateIndex int
+ predicate string
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
+
+ f := new(FailedPredicateException)
+
+ f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ trans := s.GetTransitions()[0]
+ if trans2, ok := trans.(*PredicateTransition); ok {
+ f.ruleIndex = trans2.ruleIndex
+ f.predicateIndex = trans2.predIndex
+ } else {
+ f.ruleIndex = 0
+ f.predicateIndex = 0
+ }
+ f.predicate = predicate
+ f.offendingToken = recognizer.GetCurrentToken()
+
+ return f
+}
+
+func (f *FailedPredicateException) formatMessage(predicate, message string) string {
+ if message != "" {
+ return message
+ }
+
+ return "failed predicate: {" + predicate + "}?"
+}
+
+type ParseCancellationException struct {
+}
+
+func (p ParseCancellationException) GetOffendingToken() Token {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (p ParseCancellationException) GetMessage() string {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (p ParseCancellationException) GetInputStream() IntStream {
+ //TODO implement me
+ panic("implement me")
+}
+
+func NewParseCancellationException() *ParseCancellationException {
+ // Error.call(this)
+ // Error.captureStackTrace(this, ParseCancellationException)
+ return new(ParseCancellationException)
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/file_stream.go b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go
new file mode 100644
index 000000000..5f65f809b
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bufio"
+ "os"
+)
+
+// This is an InputStream that is loaded from a file all at once
+// when you construct the object.
+
+type FileStream struct {
+ InputStream
+ filename string
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewFileStream(fileName string) (*FileStream, error) {
+
+ f, err := os.Open(fileName)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func(f *os.File) {
+ errF := f.Close()
+ if errF != nil {
+ }
+ }(f)
+
+ reader := bufio.NewReader(f)
+ fInfo, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+
+ fs := &FileStream{
+ InputStream: InputStream{
+ index: 0,
+ name: fileName,
+ },
+ filename: fileName,
+ }
+
+ // Pre-build the buffer and read runes efficiently
+ //
+ fs.data = make([]rune, 0, fInfo.Size())
+ for {
+ r, _, err := reader.ReadRune()
+ if err != nil {
+ break
+ }
+ fs.data = append(fs.data, r)
+ }
+ fs.size = len(fs.data) // Size in runes
+
+ // All done.
+ //
+ return fs, nil
+}
+
+func (f *FileStream) GetSourceName() string {
+ return f.filename
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
new file mode 100644
index 000000000..b737fe85f
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
@@ -0,0 +1,157 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bufio"
+ "io"
+)
+
+type InputStream struct {
+ name string
+ index int
+ data []rune
+ size int
+}
+
+// NewIoStream creates a new input stream from the given io.Reader reader.
+// Note that the reader is read completely into memory and so it must actually
+// have a stopping point - you cannot pass in a reader on an open-ended source such
+// as a socket for instance.
+func NewIoStream(reader io.Reader) *InputStream {
+
+ rReader := bufio.NewReader(reader)
+
+ is := &InputStream{
+ name: "",
+ index: 0,
+ }
+
+ // Pre-build the buffer and read runes reasonably efficiently given that
+ // we don't exactly know how big the input is.
+ //
+ is.data = make([]rune, 0, 512)
+ for {
+ r, _, err := rReader.ReadRune()
+ if err != nil {
+ break
+ }
+ is.data = append(is.data, r)
+ }
+ is.size = len(is.data) // number of runes
+ return is
+}
+
+// NewInputStream creates a new input stream from the given string
+func NewInputStream(data string) *InputStream {
+
+ is := &InputStream{
+ name: "",
+ index: 0,
+ data: []rune(data), // This is actually the most efficient way
+ }
+ is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too
+ return is
+}
+
+func (is *InputStream) reset() {
+ is.index = 0
+}
+
+// Consume moves the input pointer to the next character in the input stream
+func (is *InputStream) Consume() {
+ if is.index >= is.size {
+ // assert is.LA(1) == TokenEOF
+ panic("cannot consume EOF")
+ }
+ is.index++
+}
+
+// LA returns the character at the given offset from the start of the input stream
+func (is *InputStream) LA(offset int) int {
+
+ if offset == 0 {
+ return 0 // nil
+ }
+ if offset < 0 {
+ offset++ // e.g., translate LA(-1) to use offset=0
+ }
+ pos := is.index + offset - 1
+
+ if pos < 0 || pos >= is.size { // invalid
+ return TokenEOF
+ }
+
+ return int(is.data[pos])
+}
+
+// LT returns the character at the given offset from the start of the input stream
+func (is *InputStream) LT(offset int) int {
+ return is.LA(offset)
+}
+
+// Index returns the current offset in to the input stream
+func (is *InputStream) Index() int {
+ return is.index
+}
+
+// Size returns the total number of characters in the input stream
+func (is *InputStream) Size() int {
+ return is.size
+}
+
+// Mark does nothing here as we have entire buffer
+func (is *InputStream) Mark() int {
+ return -1
+}
+
+// Release does nothing here as we have entire buffer
+func (is *InputStream) Release(_ int) {
+}
+
+// Seek the input point to the provided index offset
+func (is *InputStream) Seek(index int) {
+ if index <= is.index {
+ is.index = index // just jump don't update stream state (line,...)
+ return
+ }
+ // seek forward
+ is.index = intMin(index, is.size)
+}
+
+// GetText returns the text from the input stream from the start to the stop index
+func (is *InputStream) GetText(start int, stop int) string {
+ if stop >= is.size {
+ stop = is.size - 1
+ }
+ if start >= is.size {
+ return ""
+ }
+
+ return string(is.data[start : stop+1])
+}
+
+// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last
+// character of the stop token
+func (is *InputStream) GetTextFromTokens(start, stop Token) string {
+ if start != nil && stop != nil {
+ return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
+ }
+
+ return ""
+}
+
+func (is *InputStream) GetTextFromInterval(i Interval) string {
+ return is.GetText(i.Start, i.Stop)
+}
+
+func (*InputStream) GetSourceName() string {
+ return ""
+}
+
+// String returns the entire input stream as a string
+func (is *InputStream) String() string {
+ return string(is.data)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go b/vendor/github.com/antlr4-go/antlr/v4/int_stream.go
similarity index 100%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go
rename to vendor/github.com/antlr4-go/antlr/v4/int_stream.go
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go
similarity index 82%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
rename to vendor/github.com/antlr4-go/antlr/v4/interval_set.go
index c1e155e81..cc5066067 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go
@@ -14,20 +14,21 @@ type Interval struct {
Stop int
}
-/* stop is not included! */
-func NewInterval(start, stop int) *Interval {
- i := new(Interval)
-
- i.Start = start
- i.Stop = stop
- return i
+// NewInterval creates a new interval with the given start and stop values.
+func NewInterval(start, stop int) Interval {
+ return Interval{
+ Start: start,
+ Stop: stop,
+ }
}
-func (i *Interval) Contains(item int) bool {
+// Contains returns true if the given item is contained within the interval.
+func (i Interval) Contains(item int) bool {
return item >= i.Start && item < i.Stop
}
-func (i *Interval) String() string {
+// String generates a string representation of the interval.
+func (i Interval) String() string {
if i.Start == i.Stop-1 {
return strconv.Itoa(i.Start)
}
@@ -35,15 +36,18 @@ func (i *Interval) String() string {
return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
}
-func (i *Interval) length() int {
+// Length returns the length of the interval.
+func (i Interval) Length() int {
return i.Stop - i.Start
}
+// IntervalSet represents a collection of [Intervals], which may be read-only.
type IntervalSet struct {
- intervals []*Interval
+ intervals []Interval
readOnly bool
}
+// NewIntervalSet creates a new empty, writable, interval set.
func NewIntervalSet() *IntervalSet {
i := new(IntervalSet)
@@ -54,6 +58,20 @@ func NewIntervalSet() *IntervalSet {
return i
}
+func (i *IntervalSet) Equals(other *IntervalSet) bool {
+ if len(i.intervals) != len(other.intervals) {
+ return false
+ }
+
+ for k, v := range i.intervals {
+ if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop {
+ return false
+ }
+ }
+
+ return true
+}
+
func (i *IntervalSet) first() int {
if len(i.intervals) == 0 {
return TokenInvalidType
@@ -70,16 +88,16 @@ func (i *IntervalSet) addRange(l, h int) {
i.addInterval(NewInterval(l, h+1))
}
-func (i *IntervalSet) addInterval(v *Interval) {
+func (i *IntervalSet) addInterval(v Interval) {
if i.intervals == nil {
- i.intervals = make([]*Interval, 0)
+ i.intervals = make([]Interval, 0)
i.intervals = append(i.intervals, v)
} else {
// find insert pos
for k, interval := range i.intervals {
// distinct range -> insert
if v.Stop < interval.Start {
- i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
+ i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...)
return
} else if v.Stop == interval.Start {
i.intervals[k].Start = v.Start
@@ -139,16 +157,16 @@ func (i *IntervalSet) contains(item int) bool {
}
func (i *IntervalSet) length() int {
- len := 0
+ iLen := 0
for _, v := range i.intervals {
- len += v.length()
+ iLen += v.Length()
}
- return len
+ return iLen
}
-func (i *IntervalSet) removeRange(v *Interval) {
+func (i *IntervalSet) removeRange(v Interval) {
if v.Start == v.Stop-1 {
i.removeOne(v.Start)
} else if i.intervals != nil {
@@ -162,7 +180,7 @@ func (i *IntervalSet) removeRange(v *Interval) {
i.intervals[k] = NewInterval(ni.Start, v.Start)
x := NewInterval(v.Stop, ni.Stop)
// i.intervals.splice(k, 0, x)
- i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
+ i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
return
} else if v.Start <= ni.Start && v.Stop >= ni.Stop {
// i.intervals.splice(k, 1)
@@ -199,7 +217,7 @@ func (i *IntervalSet) removeOne(v int) {
x := NewInterval(ki.Start, v)
ki.Start = v + 1
// i.intervals.splice(k, 0, x)
- i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
+ i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
return
}
}
@@ -223,7 +241,7 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin
return i.toIndexString()
}
-func (i *IntervalSet) GetIntervals() []*Interval {
+func (i *IntervalSet) GetIntervals() []Interval {
return i.intervals
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
new file mode 100644
index 000000000..ceccd96d2
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
@@ -0,0 +1,685 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+import (
+ "container/list"
+ "runtime/debug"
+ "sort"
+ "sync"
+)
+
+// Collectable is an interface that a struct should implement if it is to be
+// usable as a key in these collections.
+type Collectable[T any] interface {
+ Hash() int
+ Equals(other Collectable[T]) bool
+}
+
+type Comparator[T any] interface {
+ Hash1(o T) int
+ Equals2(T, T) bool
+}
+
+type CollectionSource int
+type CollectionDescriptor struct {
+ SybolicName string
+ Description string
+}
+
+const (
+ UnknownCollection CollectionSource = iota
+ ATNConfigLookupCollection
+ ATNStateCollection
+ DFAStateCollection
+ ATNConfigCollection
+ PredictionContextCollection
+ SemanticContextCollection
+ ClosureBusyCollection
+ PredictionVisitedCollection
+ MergeCacheCollection
+ PredictionContextCacheCollection
+ AltSetCollection
+ ReachSetCollection
+)
+
+var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{
+ UnknownCollection: {
+ SybolicName: "UnknownCollection",
+ Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.",
+ },
+ ATNConfigCollection: {
+ SybolicName: "ATNConfigCollection",
+ Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." +
+ "For instance, it is used to store the results of the closure() operation in the ATN.",
+ },
+ ATNConfigLookupCollection: {
+ SybolicName: "ATNConfigLookupCollection",
+ Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." +
+ "This is used to prevent duplicating equivalent states in an ATNConfigurationSet.",
+ },
+ ATNStateCollection: {
+ SybolicName: "ATNStateCollection",
+ Description: "ATNState collection. This is used to store the states of the ATN.",
+ },
+ DFAStateCollection: {
+ SybolicName: "DFAStateCollection",
+ Description: "DFAState collection. This is used to store the states of the DFA.",
+ },
+ PredictionContextCollection: {
+ SybolicName: "PredictionContextCollection",
+ Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.",
+ },
+ SemanticContextCollection: {
+ SybolicName: "SemanticContextCollection",
+ Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.",
+ },
+ ClosureBusyCollection: {
+ SybolicName: "ClosureBusyCollection",
+ Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." +
+ "It stores ATNConfigs that are currently being processed in the closure() operation.",
+ },
+ PredictionVisitedCollection: {
+ SybolicName: "PredictionVisitedCollection",
+ Description: "A map that records whether we have visited a particular context when searching through cached entries.",
+ },
+ MergeCacheCollection: {
+ SybolicName: "MergeCacheCollection",
+ Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.",
+ },
+ PredictionContextCacheCollection: {
+ SybolicName: "PredictionContextCacheCollection",
+ Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.",
+ },
+ AltSetCollection: {
+ SybolicName: "AltSetCollection",
+ Description: "Used to eliminate duplicate alternatives in an ATN config set.",
+ },
+ ReachSetCollection: {
+ SybolicName: "ReachSetCollection",
+ Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.",
+ },
+}
+
+// JStore implements a container that allows the use of a struct to calculate the key
+// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
+// serve the needs of the ANTLR Go runtime.
+//
+// For ease of porting the logic of the runtime from the master target (Java), this collection
+// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
+// function as the key. The values are stored in a standard go map which internally is a form of hashmap
+// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
+// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
+// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
+// we understand the requirements, then this is fine - this is not a general purpose collection.
+type JStore[T any, C Comparator[T]] struct {
+ store map[int][]T
+ len int
+ comparator Comparator[T]
+ stats *JStatRec
+}
+
+func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] {
+
+ if comparator == nil {
+ panic("comparator cannot be nil")
+ }
+
+ s := &JStore[T, C]{
+ store: make(map[int][]T, 1),
+ comparator: comparator,
+ }
+ if collectStats {
+ s.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ s.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(s.stats)
+ }
+ return s
+}
+
+// Put will store given value in the collection. Note that the key for storage is generated from
+// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
+// as any kind of general collection.
+//
+// If the key has a hash conflict, then the value will be added to the slice of values associated with the
+// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
+// tested by calling the equals() method on the key.
+//
+// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
+//
+// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
+func (s *JStore[T, C]) Put(value T) (v T, exists bool) {
+
+ if collectStats {
+ s.stats.Puts++
+ }
+ kh := s.comparator.Hash1(value)
+
+ var hClash bool
+ for _, v1 := range s.store[kh] {
+ hClash = true
+ if s.comparator.Equals2(value, v1) {
+ if collectStats {
+ s.stats.PutHits++
+ s.stats.PutHashConflicts++
+ }
+ return v1, true
+ }
+ if collectStats {
+ s.stats.PutMisses++
+ }
+ }
+ if collectStats && hClash {
+ s.stats.PutHashConflicts++
+ }
+ s.store[kh] = append(s.store[kh], value)
+
+ if collectStats {
+ if len(s.store[kh]) > s.stats.MaxSlotSize {
+ s.stats.MaxSlotSize = len(s.store[kh])
+ }
+ }
+ s.len++
+ if collectStats {
+ s.stats.CurSize = s.len
+ if s.len > s.stats.MaxSize {
+ s.stats.MaxSize = s.len
+ }
+ }
+ return value, false
+}
+
+// Get will return the value associated with the key - the type of the key is the same type as the value
+// which would not generally be useful, but this is a specific thing for ANTLR where the key is
+// generated using the object we are going to store.
+func (s *JStore[T, C]) Get(key T) (T, bool) {
+ if collectStats {
+ s.stats.Gets++
+ }
+ kh := s.comparator.Hash1(key)
+ var hClash bool
+ for _, v := range s.store[kh] {
+ hClash = true
+ if s.comparator.Equals2(key, v) {
+ if collectStats {
+ s.stats.GetHits++
+ s.stats.GetHashConflicts++
+ }
+ return v, true
+ }
+ if collectStats {
+ s.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ s.stats.GetHashConflicts++
+ }
+ s.stats.GetNoEnt++
+ }
+ return key, false
+}
+
+// Contains returns true if the given key is present in the store
+func (s *JStore[T, C]) Contains(key T) bool {
+ _, present := s.Get(key)
+ return present
+}
+
+func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
+ vs := make([]T, 0, len(s.store))
+ for _, v := range s.store {
+ vs = append(vs, v...)
+ }
+ sort.Slice(vs, func(i, j int) bool {
+ return less(vs[i], vs[j])
+ })
+
+ return vs
+}
+
+func (s *JStore[T, C]) Each(f func(T) bool) {
+ for _, e := range s.store {
+ for _, v := range e {
+ f(v)
+ }
+ }
+}
+
+func (s *JStore[T, C]) Len() int {
+ return s.len
+}
+
+func (s *JStore[T, C]) Values() []T {
+ vs := make([]T, 0, len(s.store))
+ for _, e := range s.store {
+ vs = append(vs, e...)
+ }
+ return vs
+}
+
+type entry[K, V any] struct {
+ key K
+ val V
+}
+
+type JMap[K, V any, C Comparator[K]] struct {
+ store map[int][]*entry[K, V]
+ len int
+ comparator Comparator[K]
+ stats *JStatRec
+}
+
+func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] {
+ m := &JMap[K, V, C]{
+ store: make(map[int][]*entry[K, V], 1),
+ comparator: comparator,
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) {
+ if collectStats {
+ m.stats.Puts++
+ }
+ kh := m.comparator.Hash1(key)
+
+ var hClash bool
+ for _, e := range m.store[kh] {
+ hClash = true
+ if m.comparator.Equals2(e.key, key) {
+ if collectStats {
+ m.stats.PutHits++
+ m.stats.PutHashConflicts++
+ }
+ return e.val, true
+ }
+ if collectStats {
+ m.stats.PutMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ m.stats.PutHashConflicts++
+ }
+ }
+ m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
+ if collectStats {
+ if len(m.store[kh]) > m.stats.MaxSlotSize {
+ m.stats.MaxSlotSize = len(m.store[kh])
+ }
+ }
+ m.len++
+ if collectStats {
+ m.stats.CurSize = m.len
+ if m.len > m.stats.MaxSize {
+ m.stats.MaxSize = m.len
+ }
+ }
+ return val, false
+}
+
+func (m *JMap[K, V, C]) Values() []V {
+ vs := make([]V, 0, len(m.store))
+ for _, e := range m.store {
+ for _, v := range e {
+ vs = append(vs, v.val)
+ }
+ }
+ return vs
+}
+
+func (m *JMap[K, V, C]) Get(key K) (V, bool) {
+ if collectStats {
+ m.stats.Gets++
+ }
+ var none V
+ kh := m.comparator.Hash1(key)
+ var hClash bool
+ for _, e := range m.store[kh] {
+ hClash = true
+ if m.comparator.Equals2(e.key, key) {
+ if collectStats {
+ m.stats.GetHits++
+ m.stats.GetHashConflicts++
+ }
+ return e.val, true
+ }
+ if collectStats {
+ m.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ m.stats.GetHashConflicts++
+ }
+ m.stats.GetNoEnt++
+ }
+ return none, false
+}
+
+func (m *JMap[K, V, C]) Len() int {
+ return m.len
+}
+
+func (m *JMap[K, V, C]) Delete(key K) {
+ kh := m.comparator.Hash1(key)
+ for i, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
+ m.len--
+ return
+ }
+ }
+}
+
+func (m *JMap[K, V, C]) Clear() {
+ m.store = make(map[int][]*entry[K, V])
+}
+
+type JPCMap struct {
+ store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]]
+ size int
+ stats *JStatRec
+}
+
+func NewJPCMap(cType CollectionSource, desc string) *JPCMap {
+ m := &JPCMap{
+ store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc),
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Gets++
+ }
+ // Do we have a map stored by k1?
+ //
+ m2, present := pcm.store.Get(k1)
+ if present {
+ if collectStats {
+ pcm.stats.GetHits++
+ }
+ // We found a map of values corresponding to k1, so now we need to look up k2 in that map
+ //
+ return m2.Get(k2)
+ }
+ if collectStats {
+ pcm.stats.GetMisses++
+ }
+ return nil, false
+}
+
+func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) {
+
+ if collectStats {
+ pcm.stats.Puts++
+ }
+ // First does a map already exist for k1?
+ //
+ if m2, present := pcm.store.Get(k1); present {
+ if collectStats {
+ pcm.stats.PutHits++
+ }
+ _, present = m2.Put(k2, v)
+ if !present {
+ pcm.size++
+ if collectStats {
+ pcm.stats.CurSize = pcm.size
+ if pcm.size > pcm.stats.MaxSize {
+ pcm.stats.MaxSize = pcm.size
+ }
+ }
+ }
+ } else {
+ // No map found for k1, so we create it, add in our value, then store is
+ //
+ if collectStats {
+ pcm.stats.PutMisses++
+ m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry")
+ } else {
+ m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry")
+ }
+
+ m2.Put(k2, v)
+ pcm.store.Put(k1, m2)
+ pcm.size++
+ }
+}
+
+type JPCMap2 struct {
+ store map[int][]JPCEntry
+ size int
+ stats *JStatRec
+}
+
+type JPCEntry struct {
+ k1, k2, v *PredictionContext
+}
+
+func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 {
+ m := &JPCMap2{
+ store: make(map[int][]JPCEntry, 1000),
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func dHash(k1, k2 *PredictionContext) int {
+ return k1.cachedHash*31 + k2.cachedHash
+}
+
+func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Gets++
+ }
+
+ h := dHash(k1, k2)
+ var hClash bool
+ for _, e := range pcm.store[h] {
+ hClash = true
+ if e.k1.Equals(k1) && e.k2.Equals(k2) {
+ if collectStats {
+ pcm.stats.GetHits++
+ pcm.stats.GetHashConflicts++
+ }
+ return e.v, true
+ }
+ if collectStats {
+ pcm.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ pcm.stats.GetHashConflicts++
+ }
+ pcm.stats.GetNoEnt++
+ }
+ return nil, false
+}
+
+func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Puts++
+ }
+ h := dHash(k1, k2)
+ var hClash bool
+ for _, e := range pcm.store[h] {
+ hClash = true
+ if e.k1.Equals(k1) && e.k2.Equals(k2) {
+ if collectStats {
+ pcm.stats.PutHits++
+ pcm.stats.PutHashConflicts++
+ }
+ return e.v, true
+ }
+ if collectStats {
+ pcm.stats.PutMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ pcm.stats.PutHashConflicts++
+ }
+ }
+ pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v})
+ pcm.size++
+ if collectStats {
+ pcm.stats.CurSize = pcm.size
+ if pcm.size > pcm.stats.MaxSize {
+ pcm.stats.MaxSize = pcm.size
+ }
+ }
+ return nil, false
+}
+
+type VisitEntry struct {
+ k *PredictionContext
+ v *PredictionContext
+}
+type VisitRecord struct {
+ store map[*PredictionContext]*PredictionContext
+ len int
+ stats *JStatRec
+}
+
+type VisitList struct {
+ cache *list.List
+ lock sync.RWMutex
+}
+
+var visitListPool = VisitList{
+ cache: list.New(),
+ lock: sync.RWMutex{},
+}
+
+// NewVisitRecord returns a new VisitRecord instance from the pool if available.
+// Note that this "map" uses a pointer as a key because we are emulating the behavior of
+// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal,
+// which means is the key the same reference to an object rather than is it .equals() to another
+// object.
+func NewVisitRecord() *VisitRecord {
+ visitListPool.lock.Lock()
+ el := visitListPool.cache.Front()
+ defer visitListPool.lock.Unlock()
+ var vr *VisitRecord
+ if el == nil {
+ vr = &VisitRecord{
+ store: make(map[*PredictionContext]*PredictionContext),
+ }
+ if collectStats {
+ vr.stats = &JStatRec{
+ Source: PredictionContextCacheCollection,
+ Description: "VisitRecord",
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ vr.stats.CreateStack = debug.Stack()
+ }
+ }
+ } else {
+ vr = el.Value.(*VisitRecord)
+ visitListPool.cache.Remove(el)
+ vr.store = make(map[*PredictionContext]*PredictionContext)
+ }
+ if collectStats {
+ Statistics.AddJStatRec(vr.stats)
+ }
+ return vr
+}
+
+func (vr *VisitRecord) Release() {
+ vr.len = 0
+ vr.store = nil
+ if collectStats {
+ vr.stats.MaxSize = 0
+ vr.stats.CurSize = 0
+ vr.stats.Gets = 0
+ vr.stats.GetHits = 0
+ vr.stats.GetMisses = 0
+ vr.stats.GetHashConflicts = 0
+ vr.stats.GetNoEnt = 0
+ vr.stats.Puts = 0
+ vr.stats.PutHits = 0
+ vr.stats.PutMisses = 0
+ vr.stats.PutHashConflicts = 0
+ vr.stats.MaxSlotSize = 0
+ }
+ visitListPool.lock.Lock()
+ visitListPool.cache.PushBack(vr)
+ visitListPool.lock.Unlock()
+}
+
+func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ vr.stats.Gets++
+ }
+ v := vr.store[k]
+ if v != nil {
+ if collectStats {
+ vr.stats.GetHits++
+ }
+ return v, true
+ }
+ if collectStats {
+ vr.stats.GetNoEnt++
+ }
+ return nil, false
+}
+
+func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ vr.stats.Puts++
+ }
+ vr.store[k] = v
+ vr.len++
+ if collectStats {
+ vr.stats.CurSize = vr.len
+ if vr.len > vr.stats.MaxSize {
+ vr.stats.MaxSize = vr.len
+ }
+ }
+ return v, false
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go b/vendor/github.com/antlr4-go/antlr/v4/lexer.go
similarity index 78%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
rename to vendor/github.com/antlr4-go/antlr/v4/lexer.go
index 6533f0516..3c7896a91 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer.go
@@ -69,7 +69,7 @@ func NewBaseLexer(input CharStream) *BaseLexer {
// create a single token. NextToken will return l object after
// Matching lexer rule(s). If you subclass to allow multiple token
// emissions, then set l to the last token to be Matched or
- // something nonnil so that the auto token emit mechanism will not
+ // something non nil so that the auto token emit mechanism will not
// emit another token.
lexer.token = nil
@@ -111,6 +111,7 @@ const (
LexerSkip = -3
)
+//goland:noinspection GoUnusedConst
const (
LexerDefaultTokenChannel = TokenDefaultChannel
LexerHidden = TokenHiddenChannel
@@ -118,7 +119,7 @@ const (
LexerMaxCharValue = 0x10FFFF
)
-func (b *BaseLexer) reset() {
+func (b *BaseLexer) Reset() {
// wack Lexer state variables
if b.input != nil {
b.input.Seek(0) // rewind the input
@@ -176,7 +177,7 @@ func (b *BaseLexer) safeMatch() (ret int) {
return b.Interpreter.Match(b.input, b.mode)
}
-// Return a token from l source i.e., Match a token on the char stream.
+// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream.
func (b *BaseLexer) NextToken() Token {
if b.input == nil {
panic("NextToken requires a non-nil input stream.")
@@ -205,9 +206,8 @@ func (b *BaseLexer) NextToken() Token {
continueOuter := false
for {
b.thetype = TokenInvalidType
- ttype := LexerSkip
- ttype = b.safeMatch()
+ ttype := b.safeMatch()
if b.input.LA(1) == TokenEOF {
b.hitEOF = true
@@ -234,12 +234,11 @@ func (b *BaseLexer) NextToken() Token {
}
}
-// Instruct the lexer to Skip creating a token for current lexer rule
-// and look for another token. NextToken() knows to keep looking when
-// a lexer rule finishes with token set to SKIPTOKEN. Recall that
+// Skip instructs the lexer to Skip creating a token for current lexer rule
+// and look for another token. [NextToken] knows to keep looking when
+// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that
// if token==nil at end of any token rule, it creates one for you
// and emits it.
-// /
func (b *BaseLexer) Skip() {
b.thetype = LexerSkip
}
@@ -248,23 +247,29 @@ func (b *BaseLexer) More() {
b.thetype = LexerMore
}
+// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode
+// will be in force.
func (b *BaseLexer) SetMode(m int) {
b.mode = m
}
+// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the
+// current lexer mode to the supplied mode m.
func (b *BaseLexer) PushMode(m int) {
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Println("pushMode " + strconv.Itoa(m))
}
b.modeStack.Push(b.mode)
b.mode = m
}
+// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to
+// return to.
func (b *BaseLexer) PopMode() int {
if len(b.modeStack) == 0 {
panic("Empty Stack")
}
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
}
i, _ := b.modeStack.Pop()
@@ -280,7 +285,7 @@ func (b *BaseLexer) inputStream() CharStream {
func (b *BaseLexer) SetInputStream(input CharStream) {
b.input = nil
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
- b.reset()
+ b.Reset()
b.input = input
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
}
@@ -289,20 +294,19 @@ func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
return b.tokenFactorySourcePair
}
-// By default does not support multiple emits per NextToken invocation
-// for efficiency reasons. Subclass and override l method, NextToken,
-// and GetToken (to push tokens into a list and pull from that list
-// rather than a single variable as l implementation does).
-// /
+// EmitToken by default does not support multiple emits per [NextToken] invocation
+// for efficiency reasons. Subclass and override this func, [NextToken],
+// and [GetToken] (to push tokens into a list and pull from that list
+// rather than a single variable as this implementation does).
func (b *BaseLexer) EmitToken(token Token) {
b.token = token
}
-// The standard method called to automatically emit a token at the
+// Emit is the standard method called to automatically emit a token at the
// outermost lexical rule. The token object should point into the
// char buffer start..stop. If there is a text override in 'text',
-// use that to set the token's text. Override l method to emit
-// custom Token objects or provide a Newfactory.
+// use that to set the token's text. Override this method to emit
+// custom [Token] objects or provide a new factory.
// /
func (b *BaseLexer) Emit() Token {
t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
@@ -310,6 +314,7 @@ func (b *BaseLexer) Emit() Token {
return t
}
+// EmitEOF emits an EOF token. By default, this is the last token emitted
func (b *BaseLexer) EmitEOF() Token {
cpos := b.GetCharPositionInLine()
lpos := b.GetLine()
@@ -318,6 +323,7 @@ func (b *BaseLexer) EmitEOF() Token {
return eof
}
+// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned.
func (b *BaseLexer) GetCharPositionInLine() int {
return b.Interpreter.GetCharPositionInLine()
}
@@ -334,13 +340,12 @@ func (b *BaseLexer) SetType(t int) {
b.thetype = t
}
-// What is the index of the current character of lookahead?///
+// GetCharIndex returns the index of the current character of lookahead
func (b *BaseLexer) GetCharIndex() int {
return b.input.Index()
}
-// Return the text Matched so far for the current token or any text override.
-// Set the complete text of l token it wipes any previous changes to the text.
+// GetText returns the text Matched so far for the current token or any text override.
func (b *BaseLexer) GetText() string {
if b.text != "" {
return b.text
@@ -349,17 +354,20 @@ func (b *BaseLexer) GetText() string {
return b.Interpreter.GetText(b.input)
}
+// SetText sets the complete text of this token; it wipes any previous changes to the text.
func (b *BaseLexer) SetText(text string) {
b.text = text
}
+// GetATN returns the ATN used by the lexer.
func (b *BaseLexer) GetATN() *ATN {
return b.Interpreter.ATN()
}
-// Return a list of all Token objects in input char stream.
-// Forces load of all tokens. Does not include EOF token.
-// /
+// GetAllTokens returns a list of all [Token] objects in input char stream.
+// Forces a load of all tokens that can be made from the input char stream.
+//
+// Does not include EOF token.
func (b *BaseLexer) GetAllTokens() []Token {
vl := b.Virt
tokens := make([]Token, 0)
@@ -398,11 +406,13 @@ func (b *BaseLexer) getCharErrorDisplay(c rune) string {
return "'" + b.getErrorDisplayForChar(c) + "'"
}
-// Lexers can normally Match any char in it's vocabulary after Matching
-// a token, so do the easy thing and just kill a character and hope
+// Recover can normally Match any char in its vocabulary after Matching
+// a token, so here we do the easy thing and just kill a character and hope
// it all works out. You can instead use the rule invocation stack
// to do sophisticated error recovery if you are in a fragment rule.
-// /
+//
+// In general, lexers should not need to recover and should have rules that cover any eventuality, such as
+// a character that makes no sense to the recognizer.
func (b *BaseLexer) Recover(re RecognitionException) {
if b.input.LA(1) != TokenEOF {
if _, ok := re.(*LexerNoViableAltException); ok {
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
similarity index 78%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
rename to vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
index 111656c29..eaa7393e0 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
@@ -7,14 +7,29 @@ package antlr
import "strconv"
const (
- LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
- LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
- LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
- LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
- LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
- LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
- LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
- LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
+ // LexerActionTypeChannel represents a [LexerChannelAction] action.
+ LexerActionTypeChannel = 0
+
+ // LexerActionTypeCustom represents a [LexerCustomAction] action.
+ LexerActionTypeCustom = 1
+
+ // LexerActionTypeMode represents a [LexerModeAction] action.
+ LexerActionTypeMode = 2
+
+ // LexerActionTypeMore represents a [LexerMoreAction] action.
+ LexerActionTypeMore = 3
+
+ // LexerActionTypePopMode represents a [LexerPopModeAction] action.
+ LexerActionTypePopMode = 4
+
+ // LexerActionTypePushMode represents a [LexerPushModeAction] action.
+ LexerActionTypePushMode = 5
+
+ // LexerActionTypeSkip represents a [LexerSkipAction] action.
+ LexerActionTypeSkip = 6
+
+ // LexerActionTypeType represents a [LexerTypeAction] action.
+ LexerActionTypeType = 7
)
type LexerAction interface {
@@ -39,7 +54,7 @@ func NewBaseLexerAction(action int) *BaseLexerAction {
return la
}
-func (b *BaseLexerAction) execute(lexer Lexer) {
+func (b *BaseLexerAction) execute(_ Lexer) {
panic("Not implemented")
}
@@ -52,17 +67,19 @@ func (b *BaseLexerAction) getIsPositionDependent() bool {
}
func (b *BaseLexerAction) Hash() int {
- return b.actionType
+ h := murmurInit(0)
+ h = murmurUpdate(h, b.actionType)
+ return murmurFinish(h, 1)
}
func (b *BaseLexerAction) Equals(other LexerAction) bool {
- return b == other
+ return b.actionType == other.getActionType()
}
-// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
+// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip].
//
-// The {@code Skip} command does not have any parameters, so l action is
-// implemented as a singleton instance exposed by {@link //INSTANCE}.
+// The Skip command does not have any parameters, so this action is
+// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE].
type LexerSkipAction struct {
*BaseLexerAction
}
@@ -73,17 +90,22 @@ func NewLexerSkipAction() *LexerSkipAction {
return la
}
-// Provides a singleton instance of l parameterless lexer action.
+// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action.
var LexerSkipActionINSTANCE = NewLexerSkipAction()
func (l *LexerSkipAction) execute(lexer Lexer) {
lexer.Skip()
}
+// String returns a string representation of the current [LexerSkipAction].
func (l *LexerSkipAction) String() string {
return "skip"
}
+func (b *LexerSkipAction) Equals(other LexerAction) bool {
+ return other.getActionType() == LexerActionTypeSkip
+}
+
// Implements the {@code type} lexer action by calling {@link Lexer//setType}
//
// with the assigned type.
@@ -125,11 +147,10 @@ func (l *LexerTypeAction) String() string {
return "actionType(" + strconv.Itoa(l.thetype) + ")"
}
-// Implements the {@code pushMode} lexer action by calling
-// {@link Lexer//pushMode} with the assigned mode.
+// LexerPushModeAction implements the pushMode lexer action by calling
+// [Lexer.pushMode] with the assigned mode.
type LexerPushModeAction struct {
*BaseLexerAction
-
mode int
}
@@ -169,10 +190,10 @@ func (l *LexerPushModeAction) String() string {
return "pushMode(" + strconv.Itoa(l.mode) + ")"
}
-// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
+// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode].
//
-// The {@code popMode} command does not have any parameters, so l action is
-// implemented as a singleton instance exposed by {@link //INSTANCE}.
+// The popMode command does not have any parameters, so this action is
+// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE]
type LexerPopModeAction struct {
*BaseLexerAction
}
@@ -224,11 +245,10 @@ func (l *LexerMoreAction) String() string {
return "more"
}
-// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
+// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with
// the assigned mode.
type LexerModeAction struct {
*BaseLexerAction
-
mode int
}
@@ -322,16 +342,19 @@ func (l *LexerCustomAction) Equals(other LexerAction) bool {
}
}
-// Implements the {@code channel} lexer action by calling
-// {@link Lexer//setChannel} with the assigned channel.
-// Constructs a New{@code channel} action with the specified channel value.
-// @param channel The channel value to pass to {@link Lexer//setChannel}.
+// LexerChannelAction implements the channel lexer action by calling
+// [Lexer.setChannel] with the assigned channel.
+//
+// Constructs a new channel action with the specified channel value.
type LexerChannelAction struct {
*BaseLexerAction
-
channel int
}
+// NewLexerChannelAction creates a channel lexer action by calling
+// [Lexer.setChannel] with the assigned channel.
+//
+// Constructs a new channel action with the specified channel value.
func NewLexerChannelAction(channel int) *LexerChannelAction {
l := new(LexerChannelAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
@@ -375,25 +398,22 @@ func (l *LexerChannelAction) String() string {
// lexer actions, see {@link LexerActionExecutor//append} and
// {@link LexerActionExecutor//fixOffsetBeforeMatch}.
-// Constructs a Newindexed custom action by associating a character offset
-// with a {@link LexerAction}.
-//
-// Note: This class is only required for lexer actions for which
-// {@link LexerAction//isPositionDependent} returns {@code true}.
-//
-// @param offset The offset into the input {@link CharStream}, relative to
-// the token start index, at which the specified lexer action should be
-// executed.
-// @param action The lexer action to execute at a particular offset in the
-// input {@link CharStream}.
type LexerIndexedCustomAction struct {
*BaseLexerAction
-
offset int
lexerAction LexerAction
isPositionDependent bool
}
+// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset
+// with a [LexerAction].
+//
+// Note: This class is only required for lexer actions for which
+// [LexerAction.isPositionDependent] returns true.
+//
+// The offset points into the input [CharStream], relative to
+// the token start index, at which the specified lexerAction should be
+// executed.
func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
l := new(LexerIndexedCustomAction)
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
new file mode 100644
index 000000000..dfc28c32b
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
@@ -0,0 +1,173 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "golang.org/x/exp/slices"
+
+// Represents an executor for a sequence of lexer actions which traversed during
+// the Matching operation of a lexer rule (token).
+//
+// The executor tracks position information for position-dependent lexer actions
+// efficiently, ensuring that actions appearing only at the end of the rule do
+// not cause bloating of the {@link DFA} created for the lexer.
+
+type LexerActionExecutor struct {
+ lexerActions []LexerAction
+ cachedHash int
+}
+
+func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
+
+ if lexerActions == nil {
+ lexerActions = make([]LexerAction, 0)
+ }
+
+ l := new(LexerActionExecutor)
+
+ l.lexerActions = lexerActions
+
+ // Caches the result of {@link //hashCode} since the hash code is an element
+ // of the performance-critical {@link ATNConfig//hashCode} operation.
+ l.cachedHash = murmurInit(0)
+ for _, a := range lexerActions {
+ l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
+ }
+ l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions))
+
+ return l
+}
+
+// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for
+// the input [LexerActionExecutor] followed by a specified
+// [LexerAction].
+// TODO: This does not match the Java code
+func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
+ if lexerActionExecutor == nil {
+ return NewLexerActionExecutor([]LexerAction{lexerAction})
+ }
+
+ return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
+}
+
+// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset
+// for position-dependent lexer actions.
+//
+// Normally, when the executor encounters lexer actions where
+// [LexerAction.isPositionDependent] returns true, it calls
+// [IntStream.Seek] on the input [CharStream] to set the input
+// position to the end of the current token. This behavior provides
+// for efficient [DFA] representation of lexer actions which appear at the end
+// of a lexer rule, even when the lexer rule Matches a variable number of
+// characters.
+//
+// Prior to traversing a Match transition in the [ATN], the current offset
+// from the token start index is assigned to all position-dependent lexer
+// actions which have not already been assigned a fixed offset. By storing
+// the offsets relative to the token start index, the [DFA] representation of
+// lexer actions which appear in the middle of tokens remains efficient due
+// to sharing among tokens of the same Length, regardless of their absolute
+// position in the input stream.
+//
+// If the current executor already has offsets assigned to all
+// position-dependent lexer actions, the method returns this instance.
+//
+// The offset is assigned to all position-dependent
+// lexer actions which do not already have offsets assigned.
+//
+// The func returns a [LexerActionExecutor] that stores input stream offsets
+// for all position-dependent lexer actions.
+func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
+ var updatedLexerActions []LexerAction
+ for i := 0; i < len(l.lexerActions); i++ {
+ _, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
+ if l.lexerActions[i].getIsPositionDependent() && !ok {
+ if updatedLexerActions == nil {
+ updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions))
+ updatedLexerActions = append(updatedLexerActions, l.lexerActions...)
+ }
+ updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
+ }
+ }
+ if updatedLexerActions == nil {
+ return l
+ }
+
+ return NewLexerActionExecutor(updatedLexerActions)
+}
+
+// Execute the actions encapsulated by l executor within the context of a
+// particular {@link Lexer}.
+//
+// This method calls {@link IntStream//seek} to set the position of the
+// {@code input} {@link CharStream} prior to calling
+// {@link LexerAction//execute} on a position-dependent action. Before the
+// method returns, the input position will be restored to the same position
+// it was in when the method was invoked.
+//
+// @param lexer The lexer instance.
+// @param input The input stream which is the source for the current token.
+// When l method is called, the current {@link IntStream//index} for
+// {@code input} should be the start of the following token, i.e. 1
+// character past the end of the current token.
+// @param startIndex The token start index. This value may be passed to
+// {@link IntStream//seek} to set the {@code input} position to the beginning
+// of the token.
+// /
+func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
+ requiresSeek := false
+ stopIndex := input.Index()
+
+ defer func() {
+ if requiresSeek {
+ input.Seek(stopIndex)
+ }
+ }()
+
+ for i := 0; i < len(l.lexerActions); i++ {
+ lexerAction := l.lexerActions[i]
+ if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
+ offset := la.offset
+ input.Seek(startIndex + offset)
+ lexerAction = la.lexerAction
+ requiresSeek = (startIndex + offset) != stopIndex
+ } else if lexerAction.getIsPositionDependent() {
+ input.Seek(stopIndex)
+ requiresSeek = false
+ }
+ lexerAction.execute(lexer)
+ }
+}
+
+func (l *LexerActionExecutor) Hash() int {
+ if l == nil {
+ // TODO: Why is this here? l should not be nil
+ return 61
+ }
+
+ // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode
+ return l.cachedHash
+}
+
+func (l *LexerActionExecutor) Equals(other interface{}) bool {
+ if l == other {
+ return true
+ }
+ othert, ok := other.(*LexerActionExecutor)
+ if !ok {
+ return false
+ }
+ if othert == nil {
+ return false
+ }
+ if l.cachedHash != othert.cachedHash {
+ return false
+ }
+ if len(l.lexerActions) != len(othert.lexerActions) {
+ return false
+ }
+ return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool {
+ return i.Equals(j)
+ })
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
similarity index 80%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
rename to vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
index c573b7521..fe938b025 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
@@ -10,10 +10,8 @@ import (
"strings"
)
+//goland:noinspection GoUnusedGlobalVariable
var (
- LexerATNSimulatorDebug = false
- LexerATNSimulatorDFADebug = false
-
LexerATNSimulatorMinDFAEdge = 0
LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
@@ -32,11 +30,11 @@ type ILexerATNSimulator interface {
}
type LexerATNSimulator struct {
- *BaseATNSimulator
+ BaseATNSimulator
recog Lexer
predictionMode int
- mergeCache DoubleDict
+ mergeCache *JPCMap2
startIndex int
Line int
CharPositionInLine int
@@ -46,27 +44,35 @@ type LexerATNSimulator struct {
}
func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
- l := new(LexerATNSimulator)
-
- l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
+ l := &LexerATNSimulator{
+ BaseATNSimulator: BaseATNSimulator{
+ atn: atn,
+ sharedContextCache: sharedContextCache,
+ },
+ }
l.decisionToDFA = decisionToDFA
l.recog = recog
+
// The current token's starting index into the character stream.
// Shared across DFA to ATN simulation in case the ATN fails and the
// DFA did not have a previous accept state. In l case, we use the
// ATN-generated exception object.
l.startIndex = -1
- // line number 1..n within the input///
+
+ // line number 1..n within the input
l.Line = 1
+
// The index of the character relative to the beginning of the line
- // 0..n-1///
+ // 0..n-1
l.CharPositionInLine = 0
+
l.mode = LexerDefaultMode
+
// Used during DFA/ATN exec to record the most recent accept configuration
// info
l.prevAccept = NewSimState()
- // done
+
return l
}
@@ -114,7 +120,7 @@ func (l *LexerATNSimulator) reset() {
func (l *LexerATNSimulator) MatchATN(input CharStream) int {
startState := l.atn.modeToStartState[l.mode]
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
}
oldMode := l.mode
@@ -126,7 +132,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int {
predict := l.execATN(input, next)
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
}
return predict
@@ -134,18 +140,18 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int {
func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Println("start state closure=" + ds0.configs.String())
}
if ds0.isAcceptState {
- // allow zero-length tokens
+ // allow zero-Length tokens
l.captureSimState(l.prevAccept, input, ds0)
}
t := input.LA(1)
s := ds0 // s is current/from DFA state
for { // while more work
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Println("execATN loop starting closure: " + s.configs.String())
}
@@ -188,7 +194,7 @@ func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
}
}
t = input.LA(1)
- s = target // flip current DFA target becomes Newsrc/from state
+ s = target // flip current DFA target becomes new src/from state
}
return l.failOrAccept(l.prevAccept, input, s.configs, t)
@@ -214,43 +220,39 @@ func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState
return nil
}
target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
- if LexerATNSimulatorDebug && target != nil {
+ if runtimeConfig.lexerATNSimulatorDebug && target != nil {
fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
}
return target
}
-// Compute a target state for an edge in the DFA, and attempt to add the
-// computed state and corresponding edge to the DFA.
+// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the
+// computed state and corresponding edge to the [DFA].
//
-// @param input The input stream
-// @param s The current DFA state
-// @param t The next input symbol
-//
-// @return The computed target DFA state for the given input symbol
-// {@code t}. If {@code t} does not lead to a valid DFA state, l method
-// returns {@link //ERROR}.
+// The func returns the computed target [DFA] state for the given input symbol t.
+// If this does not lead to a valid [DFA] state, this method
+// returns ATNSimulatorError.
func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
reach := NewOrderedATNConfigSet()
// if we don't find an existing DFA state
// Fill reach starting from closure, following t transitions
- l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
+ l.getReachableConfigSet(input, s.configs, reach, t)
if len(reach.configs) == 0 { // we got nowhere on t from s
if !reach.hasSemanticContext {
// we got nowhere on t, don't panic out l knowledge it'd
- // cause a failover from DFA later.
+ // cause a fail-over from DFA later.
l.addDFAEdge(s, t, ATNSimulatorError, nil)
}
// stop when we can't Match any more char
return ATNSimulatorError
}
// Add an edge from s to target DFA found/created for reach
- return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
+ return l.addDFAEdge(s, t, nil, reach)
}
-func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
+func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int {
if l.prevAccept.dfaState != nil {
lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
@@ -265,34 +267,35 @@ func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream,
panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
}
-// Given a starting configuration set, figure out all ATN configurations
-// we can reach upon input {@code t}. Parameter {@code reach} is a return
-// parameter.
-func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
+// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations
+// we can reach upon input t.
+//
+// Parameter reach is a return parameter.
+func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) {
// l is used to Skip processing for configs which have a lower priority
- // than a config that already reached an accept state for the same rule
+ // than a runtimeConfig that already reached an accept state for the same rule
SkipAlt := ATNInvalidAltNumber
- for _, cfg := range closure.GetItems() {
- currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
- if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
+ for _, cfg := range closure.configs {
+ currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt
+ if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision {
continue
}
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
- fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
+ fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String())
}
for _, trans := range cfg.GetState().GetTransitions() {
target := l.getReachableTarget(trans, t)
if target != nil {
- lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
+ lexerActionExecutor := cfg.lexerActionExecutor
if lexerActionExecutor != nil {
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
}
- treatEOFAsEpsilon := (t == TokenEOF)
- config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
+ treatEOFAsEpsilon := t == TokenEOF
+ config := NewLexerATNConfig3(cfg, target, lexerActionExecutor)
if l.closure(input, config, reach,
currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
// any remaining configs for l alt have a lower priority
@@ -305,7 +308,7 @@ func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNC
}
func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Printf("ACTION %v\n", lexerActionExecutor)
}
// seek to after last char in token
@@ -325,7 +328,7 @@ func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState
return nil
}
-func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
+func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet {
configs := NewOrderedATNConfigSet()
for i := 0; i < len(p.GetTransitions()); i++ {
target := p.GetTransitions()[i].getTarget()
@@ -336,25 +339,24 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *Ord
return configs
}
-// Since the alternatives within any lexer decision are ordered by
-// preference, l method stops pursuing the closure as soon as an accept
+// closure since the alternatives within any lexer decision are ordered by
+// preference, this method stops pursuing the closure as soon as an accept
// state is reached. After the first accept state is reached by depth-first
-// search from {@code config}, all other (potentially reachable) states for
-// l rule would have a lower priority.
+// search from runtimeConfig, all other (potentially reachable) states for
+// this rule would have a lower priority.
//
-// @return {@code true} if an accept state is reached, otherwise
-// {@code false}.
-func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
+// The func returns true if an accept state is reached.
+func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet,
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
- if LexerATNSimulatorDebug {
- fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("closure(" + config.String() + ")")
}
_, ok := config.state.(*RuleStopState)
if ok {
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
if l.recog != nil {
fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
} else {
@@ -401,10 +403,10 @@ func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, co
}
// side-effect: can alter configs.hasSemanticContext
-func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
- configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
+func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition,
+ configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig {
- var cfg *LexerATNConfig
+ var cfg *ATNConfig
if trans.getSerializationType() == TransitionRULE {
@@ -435,10 +437,10 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC
pt := trans.(*PredicateTransition)
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
}
- configs.SetHasSemanticContext(true)
+ configs.hasSemanticContext = true
if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
cfg = NewLexerATNConfig4(config, trans.getTarget())
}
@@ -449,7 +451,7 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC
// TODO: if the entry rule is invoked recursively, some
// actions may be executed during the recursive call. The
// problem can appear when hasEmptyPath() is true but
- // isEmpty() is false. In l case, the config needs to be
+ // isEmpty() is false. In this case, the config needs to be
// split into two contexts - one with just the empty path
// and another with everything but the empty path.
// Unfortunately, the current algorithm does not allow
@@ -476,26 +478,18 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC
return cfg
}
-// Evaluate a predicate specified in the lexer.
+// evaluatePredicate eEvaluates a predicate specified in the lexer.
//
-// If {@code speculative} is {@code true}, l method was called before
-// {@link //consume} for the Matched character. This method should call
-// {@link //consume} before evaluating the predicate to ensure position
-// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
-// and {@link Lexer//getcolumn}, properly reflect the current
-// lexer state. This method should restore {@code input} and the simulator
-// to the original state before returning (i.e. undo the actions made by the
-// call to {@link //consume}.
+// If speculative is true, this method was called before
+// [consume] for the Matched character. This method should call
+// [consume] before evaluating the predicate to ensure position
+// sensitive values, including [GetText], [GetLine],
+// and [GetColumn], properly reflect the current
+// lexer state. This method should restore input and the simulator
+// to the original state before returning, i.e. undo the actions made by the
+// call to [Consume].
//
-// @param input The input stream.
-// @param ruleIndex The rule containing the predicate.
-// @param predIndex The index of the predicate within the rule.
-// @param speculative {@code true} if the current index in {@code input} is
-// one character before the predicate's location.
-//
-// @return {@code true} if the specified predicate evaluates to
-// {@code true}.
-// /
+// The func returns true if the specified predicate evaluates to true.
func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
// assume true if no recognizer was provided
if l.recog == nil {
@@ -527,7 +521,7 @@ func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream
settings.dfaState = dfaState
}
-func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
+func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState {
if to == nil && cfgs != nil {
// leading to l call, ATNConfigSet.hasSemanticContext is used as a
// marker indicating dynamic predicate evaluation makes l edge
@@ -539,10 +533,9 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
// TJP notes: next time through the DFA, we see a pred again and eval.
// If that gets us to a previously created (but dangling) DFA
// state, we can continue in pure DFA mode from there.
- // /
- suppressEdge := cfgs.HasSemanticContext()
- cfgs.SetHasSemanticContext(false)
-
+ //
+ suppressEdge := cfgs.hasSemanticContext
+ cfgs.hasSemanticContext = false
to = l.addDFAState(cfgs, true)
if suppressEdge {
@@ -554,7 +547,7 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
// Only track edges within the DFA bounds
return to
}
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
}
l.atn.edgeMu.Lock()
@@ -572,13 +565,12 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
// configurations already. This method also detects the first
// configuration containing an ATN rule stop state. Later, when
// traversing the DFA, we will know which rule to accept.
-func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState {
+func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState {
proposed := NewDFAState(-1, configs)
- var firstConfigWithRuleStopState ATNConfig
-
- for _, cfg := range configs.GetItems() {
+ var firstConfigWithRuleStopState *ATNConfig
+ for _, cfg := range configs.configs {
_, ok := cfg.GetState().(*RuleStopState)
if ok {
@@ -588,14 +580,14 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool)
}
if firstConfigWithRuleStopState != nil {
proposed.isAcceptState = true
- proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
+ proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
}
dfa := l.decisionToDFA[l.mode]
l.atn.stateMu.Lock()
defer l.atn.stateMu.Unlock()
- existing, present := dfa.states.Get(proposed)
+ existing, present := dfa.Get(proposed)
if present {
// This state was already present, so just return it.
@@ -605,10 +597,11 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool)
// We need to add the new state
//
- proposed.stateNumber = dfa.states.Len()
- configs.SetReadOnly(true)
+ proposed.stateNumber = dfa.Len()
+ configs.readOnly = true
+ configs.configLookup = nil // Not needed now
proposed.configs = configs
- dfa.states.Put(proposed)
+ dfa.Put(proposed)
}
if !suppressEdge {
dfa.setS0(proposed)
@@ -620,7 +613,7 @@ func (l *LexerATNSimulator) getDFA(mode int) *DFA {
return l.decisionToDFA[mode]
}
-// Get the text Matched so far for the current token.
+// GetText returns the text [Match]ed so far for the current token.
func (l *LexerATNSimulator) GetText(input CharStream) string {
// index is first lookahead char, don't include.
return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
diff --git a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
new file mode 100644
index 000000000..4955ac876
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
@@ -0,0 +1,218 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type LL1Analyzer struct {
+ atn *ATN
+}
+
+func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
+ la := new(LL1Analyzer)
+ la.atn = atn
+ return la
+}
+
+const (
+ // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit
+ // a predicate during analysis if
+ //
+ // seeThruPreds==false
+ LL1AnalyzerHitPred = TokenInvalidType
+)
+
+// *
+// Calculates the SLL(1) expected lookahead set for each outgoing transition
+// of an {@link ATNState}. The returned array has one element for each
+// outgoing transition in {@code s}. If the closure from transition
+// i leads to a semantic predicate before Matching a symbol, the
+// element at index i of the result will be {@code nil}.
+//
+// @param s the ATN state
+// @return the expected symbols for each outgoing transition of {@code s}.
+func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
+ if s == nil {
+ return nil
+ }
+ count := len(s.GetTransitions())
+ look := make([]*IntervalSet, count)
+ for alt := 0; alt < count; alt++ {
+
+ look[alt] = NewIntervalSet()
+ lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy")
+ la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false)
+
+ // Wipe out lookahead for la alternative if we found nothing,
+ // or we had a predicate when we !seeThruPreds
+ if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
+ look[alt] = nil
+ }
+ }
+ return look
+}
+
+// Look computes the set of tokens that can follow s in the [ATN] in the
+// specified ctx.
+//
+// If ctx is nil and the end of the rule containing
+// s is reached, [EPSILON] is added to the result set.
+//
+// If ctx is not nil and the end of the outermost rule is
+// reached, [EOF] is added to the result set.
+//
+// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a
+// [BlockEndState] to detect epsilon paths through a closure.
+//
+// Parameter ctx is the complete parser context, or nil if the context
+// should be ignored
+//
+// The func returns the set of tokens that can follow s in the [ATN] in the
+// specified ctx.
+func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
+ r := NewIntervalSet()
+ var lookContext *PredictionContext
+ if ctx != nil {
+ lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
+ }
+ la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"),
+ NewBitSet(), true, true)
+ return r
+}
+
+//*
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+//
+// If {@code ctx} is {@code nil} and {@code stopState} or the end of the
+// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
+// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
+// {@code true} and {@code stopState} or the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
+//
+// @param s the ATN state.
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx The outer context, or {@code nil} if the outer context should
+// not be used.
+// @param look The result lookahead set.
+// @param lookBusy A set used for preventing epsilon closures in the ATN
+// from causing a stack overflow. Outside code should pass
+// {@code NewSet} for la argument.
+// @param calledRuleStack A set used for preventing left recursion in the
+// ATN from causing a stack overflow. Outside code should pass
+// {@code NewBitSet()} for la argument.
+// @param seeThruPreds {@code true} to true semantic predicates as
+// implicitly {@code true} and "see through them", otherwise {@code false}
+// to treat semantic predicates as opaque and add {@link //HitPred} to the
+// result if one is encountered.
+// @param addEOF Add {@link Token//EOF} to the result if the end of the
+// outermost context is reached. This parameter has no effect if {@code ctx}
+// is {@code nil}.
+
+func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
+ calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
+
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
+
+func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
+
+ c := NewATNConfig6(s, 0, ctx)
+
+ if lookBusy.Contains(c) {
+ return
+ }
+
+ _, present := lookBusy.Put(c)
+ if present {
+ return
+
+ }
+ if s == stopState {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+ }
+
+ _, ok := s.(*RuleStopState)
+
+ if ok {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+
+ if ctx.pcType != PredictionContextEmpty {
+ removed := calledRuleStack.contains(s.GetRuleIndex())
+ defer func() {
+ if removed {
+ calledRuleStack.add(s.GetRuleIndex())
+ }
+ }()
+ calledRuleStack.remove(s.GetRuleIndex())
+ // run thru all possible stack tops in ctx
+ for i := 0; i < ctx.length(); i++ {
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
+ }
+ return
+ }
+ }
+
+ n := len(s.GetTransitions())
+
+ for i := 0; i < n; i++ {
+ t := s.GetTransitions()[i]
+
+ if t1, ok := t.(*RuleTransition); ok {
+ if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
+ continue
+ }
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+ la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
+ } else if t2, ok := t.(AbstractPredicateTransition); ok {
+ if seeThruPreds {
+ la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else {
+ look.addOne(LL1AnalyzerHitPred)
+ }
+ } else if t.getIsEpsilon() {
+ la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else if _, ok := t.(*WildcardTransition); ok {
+ look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
+ } else {
+ set := t.getLabel()
+ if set != nil {
+ if _, ok := t.(*NotSetTransition); ok {
+ set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
+ }
+ look.addSet(set)
+ }
+ }
+ }
+}
+
+func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
+ calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+
+ defer func() {
+ calledRuleStack.remove(t1.getTarget().GetRuleIndex())
+ }()
+
+ calledRuleStack.add(t1.getTarget().GetRuleIndex())
+ la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
new file mode 100644
index 000000000..923c7b52c
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
@@ -0,0 +1,47 @@
+//go:build !antlr.stats
+
+package antlr
+
+// This file is compiled when the build configuration antlr.stats is not enabled.
+// which then allows the compiler to optimize out all the code that is not used.
+const collectStats = false
+
+// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled.
+type goRunStats struct {
+}
+
+var Statistics = &goRunStats{}
+
+func (s *goRunStats) AddJStatRec(_ *JStatRec) {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) CollectionAnomalies() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) Reset() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) Report(dir string, prefix string) error {
+ // Do nothing - compiler will optimize this out (hopefully)
+ return nil
+}
+
+func (s *goRunStats) Analyze() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+type statsOption func(*goRunStats) error
+
+func (s *goRunStats) Configure(options ...statsOption) error {
+ // Do nothing - compiler will optimize this out (hopefully)
+ return nil
+}
+
+func WithTopN(topN int) statsOption {
+ return func(s *goRunStats) error {
+ return nil
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser.go b/vendor/github.com/antlr4-go/antlr/v4/parser.go
new file mode 100644
index 000000000..fb57ac15d
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/parser.go
@@ -0,0 +1,700 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+type Parser interface {
+ Recognizer
+
+ GetInterpreter() *ParserATNSimulator
+
+ GetTokenStream() TokenStream
+ GetTokenFactory() TokenFactory
+ GetParserRuleContext() ParserRuleContext
+ SetParserRuleContext(ParserRuleContext)
+ Consume() Token
+ GetParseListeners() []ParseTreeListener
+
+ GetErrorHandler() ErrorStrategy
+ SetErrorHandler(ErrorStrategy)
+ GetInputStream() IntStream
+ GetCurrentToken() Token
+ GetExpectedTokens() *IntervalSet
+ NotifyErrorListeners(string, Token, RecognitionException)
+ IsExpectedToken(int) bool
+ GetPrecedence() int
+ GetRuleInvocationStack(ParserRuleContext) []string
+}
+
+type BaseParser struct {
+ *BaseRecognizer
+
+ Interpreter *ParserATNSimulator
+ BuildParseTrees bool
+
+ input TokenStream
+ errHandler ErrorStrategy
+ precedenceStack IntStack
+ ctx ParserRuleContext
+
+ tracer *TraceListener
+ parseListeners []ParseTreeListener
+ _SyntaxErrors int
+}
+
+// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error
+// recovery stuff.
+//
+//goland:noinspection GoUnusedExportedFunction
+func NewBaseParser(input TokenStream) *BaseParser {
+
+ p := new(BaseParser)
+
+ p.BaseRecognizer = NewBaseRecognizer()
+
+ // The input stream.
+ p.input = nil
+
+ // The error handling strategy for the parser. The default value is a new
+ // instance of {@link DefaultErrorStrategy}.
+ p.errHandler = NewDefaultErrorStrategy()
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+
+ // The ParserRuleContext object for the currently executing rule.
+ // p.is always non-nil during the parsing process.
+ p.ctx = nil
+
+ // Specifies whether the parser should construct a parse tree during
+ // the parsing process. The default value is {@code true}.
+ p.BuildParseTrees = true
+
+ // When setTrace(true) is called, a reference to the
+ // TraceListener is stored here, so it can be easily removed in a
+ // later call to setTrace(false). The listener itself is
+ // implemented as a parser listener so p.field is not directly used by
+ // other parser methods.
+ p.tracer = nil
+
+ // The list of ParseTreeListener listeners registered to receive
+ // events during the parse.
+ p.parseListeners = nil
+
+ // The number of syntax errors Reported during parsing. p.value is
+ // incremented each time NotifyErrorListeners is called.
+ p._SyntaxErrors = 0
+ p.SetInputStream(input)
+
+ return p
+}
+
+// This field maps from the serialized ATN string to the deserialized [ATN] with
+// bypass alternatives.
+//
+// [ATNDeserializationOptions.isGenerateRuleBypassTransitions]
+//
+//goland:noinspection GoUnusedGlobalVariable
+var bypassAltsAtnCache = make(map[string]int)
+
+// reset the parser's state//
+func (p *BaseParser) reset() {
+ if p.input != nil {
+ p.input.Seek(0)
+ }
+ p.errHandler.reset(p)
+ p.ctx = nil
+ p._SyntaxErrors = 0
+ p.SetTrace(nil)
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+ if p.Interpreter != nil {
+ p.Interpreter.reset()
+ }
+}
+
+func (p *BaseParser) GetErrorHandler() ErrorStrategy {
+ return p.errHandler
+}
+
+func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
+ p.errHandler = e
+}
+
+// Match current input symbol against {@code ttype}. If the symbol type
+// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
+// called to complete the Match process.
+//
+// If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @param ttype the token type to Match
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// {@code ttype} and the error strategy could not recover from the
+// mismatched symbol
+
+func (p *BaseParser) Match(ttype int) Token {
+
+ t := p.GetCurrentToken()
+
+ if t.GetTokenType() == ttype {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.HasError() {
+ return nil
+ }
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+
+ // we must have conjured up a new token during single token
+ // insertion if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+
+ return t
+}
+
+// Match current input symbol as a wildcard. If the symbol type Matches
+// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
+// and {@link //consume} are called to complete the Match process.
+//
+// If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// a wildcard and the error strategy could not recover from the mismatched
+// symbol
+
+func (p *BaseParser) MatchWildcard() Token {
+ t := p.GetCurrentToken()
+ if t.GetTokenType() > 0 {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+ // we must have conjured up a new token during single token
+ // insertion if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+ return t
+}
+
+func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
+ return p.ctx
+}
+
+func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
+ p.ctx = v
+}
+
+func (p *BaseParser) GetParseListeners() []ParseTreeListener {
+ if p.parseListeners == nil {
+ return make([]ParseTreeListener, 0)
+ }
+ return p.parseListeners
+}
+
+// AddParseListener registers listener to receive events during the parsing process.
+//
+// To support output-preserving grammar transformations (including but not
+// limited to left-recursion removal, automated left-factoring, and
+// optimized code generation), calls to listener methods during the parse
+// may differ substantially from calls made by
+// [ParseTreeWalker.DEFAULT] used after the parse is complete. In
+// particular, rule entry and exit events may occur in a different order
+// during the parse than after the parser. In addition, calls to certain
+// rule entry methods may be omitted.
+//
+// With the following specific exceptions, calls to listener events are
+// deterministic, i.e. for identical input the calls to listener
+// methods will be the same.
+//
+// - Alterations to the grammar used to generate code may change the
+// behavior of the listener calls.
+// - Alterations to the command line options passed to ANTLR 4 when
+// generating the parser may change the behavior of the listener calls.
+// - Changing the version of the ANTLR Tool used to generate the parser
+// may change the behavior of the listener calls.
+func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
+ if listener == nil {
+ panic("listener")
+ }
+ if p.parseListeners == nil {
+ p.parseListeners = make([]ParseTreeListener, 0)
+ }
+ p.parseListeners = append(p.parseListeners, listener)
+}
+
+// RemoveParseListener removes listener from the list of parse listeners.
+//
+// If listener is nil or has not been added as a parse
+// listener, this func does nothing.
+func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
+
+ if p.parseListeners != nil {
+
+ idx := -1
+ for i, v := range p.parseListeners {
+ if v == listener {
+ idx = i
+ break
+ }
+ }
+
+ if idx == -1 {
+ return
+ }
+
+ // remove the listener from the slice
+ p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
+
+ if len(p.parseListeners) == 0 {
+ p.parseListeners = nil
+ }
+ }
+}
+
+// Remove all parse listeners.
+func (p *BaseParser) removeParseListeners() {
+ p.parseListeners = nil
+}
+
+// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event.
+func (p *BaseParser) TriggerEnterRuleEvent() {
+ if p.parseListeners != nil {
+ ctx := p.ctx
+ for _, listener := range p.parseListeners {
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+ }
+ }
+}
+
+// TriggerExitRuleEvent notifies any parse listeners of an exit rule event.
+func (p *BaseParser) TriggerExitRuleEvent() {
+ if p.parseListeners != nil {
+ // reverse order walk of listeners
+ ctx := p.ctx
+ l := len(p.parseListeners) - 1
+
+ for i := range p.parseListeners {
+ listener := p.parseListeners[l-i]
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+ }
+ }
+}
+
+func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
+ return p.Interpreter
+}
+
+func (p *BaseParser) GetATN() *ATN {
+ return p.Interpreter.atn
+}
+
+func (p *BaseParser) GetTokenFactory() TokenFactory {
+ return p.input.GetTokenSource().GetTokenFactory()
+}
+
+// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens.
+func (p *BaseParser) setTokenFactory(factory TokenFactory) {
+ p.input.GetTokenSource().setTokenFactory(factory)
+}
+
+// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it
+// lazily.
+func (p *BaseParser) GetATNWithBypassAlts() {
+
+ // TODO - Implement this?
+ panic("Not implemented!")
+
+ // serializedAtn := p.getSerializedATN()
+ // if (serializedAtn == nil) {
+ // panic("The current parser does not support an ATN with bypass alternatives.")
+ // }
+ // result := p.bypassAltsAtnCache[serializedAtn]
+ // if (result == nil) {
+ // deserializationOptions := NewATNDeserializationOptions(nil)
+ // deserializationOptions.generateRuleBypassTransitions = true
+ // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
+ // p.bypassAltsAtnCache[serializedAtn] = result
+ // }
+ // return result
+}
+
+// The preferred method of getting a tree pattern. For example, here's a
+// sample use:
+//
+//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+//
+
+//goland:noinspection GoUnusedParameter
+func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
+
+ panic("NewParseTreePatternMatcher not implemented!")
+ //
+ // if (lexer == nil) {
+ // if (p.GetTokenStream() != nil) {
+ // tokenSource := p.GetTokenStream().GetTokenSource()
+ // if _, ok := tokenSource.(ILexer); ok {
+ // lexer = tokenSource
+ // }
+ // }
+ // }
+ // if (lexer == nil) {
+ // panic("Parser can't discover a lexer to use")
+ // }
+
+ // m := NewParseTreePatternMatcher(lexer, p)
+ // return m.compile(pattern, patternRuleIndex)
+}
+
+func (p *BaseParser) GetInputStream() IntStream {
+ return p.GetTokenStream()
+}
+
+func (p *BaseParser) SetInputStream(input TokenStream) {
+ p.SetTokenStream(input)
+}
+
+func (p *BaseParser) GetTokenStream() TokenStream {
+ return p.input
+}
+
+// SetTokenStream installs input as the token stream and resets the parser.
+func (p *BaseParser) SetTokenStream(input TokenStream) {
+ p.input = nil
+ p.reset()
+ p.input = input
+}
+
+// GetCurrentToken returns the current token at LT(1).
+//
+// [Match] needs to return the current input symbol, which gets put
+// into the label for the associated token ref e.g., x=ID.
+func (p *BaseParser) GetCurrentToken() Token {
+ return p.input.LT(1)
+}
+
+func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
+ if offendingToken == nil {
+ offendingToken = p.GetCurrentToken()
+ }
+ p._SyntaxErrors++
+ line := offendingToken.GetLine()
+ column := offendingToken.GetColumn()
+ listener := p.GetErrorListenerDispatch()
+ listener.SyntaxError(p, offendingToken, line, column, msg, err)
+}
+
+func (p *BaseParser) Consume() Token {
+ o := p.GetCurrentToken()
+ if o.GetTokenType() != TokenEOF {
+ p.GetInputStream().Consume()
+ }
+ hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
+ if p.BuildParseTrees || hasListener {
+ if p.errHandler.InErrorRecoveryMode(p) {
+ node := p.ctx.AddErrorNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitErrorNode(node)
+ }
+ }
+
+ } else {
+ node := p.ctx.AddTokenNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitTerminal(node)
+ }
+ }
+ }
+ // node.invokingState = p.state
+ }
+
+ return o
+}
+
+func (p *BaseParser) addContextToParseTree() {
+ // add current context to parent if we have a parent
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
+ }
+}
+
+func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) {
+ p.SetState(state)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.BuildParseTrees {
+ p.addContextToParseTree()
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent()
+ }
+}
+
+func (p *BaseParser) ExitRule() {
+ p.ctx.SetStop(p.input.LT(-1))
+ // trigger event on ctx, before it reverts to parent
+ if p.parseListeners != nil {
+ p.TriggerExitRuleEvent()
+ }
+ p.SetState(p.ctx.GetInvokingState())
+ if p.ctx.GetParent() != nil {
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ } else {
+ p.ctx = nil
+ }
+}
+
+func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
+ localctx.SetAltNumber(altNum)
+ // if we have a new localctx, make sure we replace existing ctx
+ // that is previous child of parse tree
+ if p.BuildParseTrees && p.ctx != localctx {
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
+ p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
+ }
+ }
+ p.ctx = localctx
+}
+
+// Get the precedence level for the top-most precedence rule.
+//
+// @return The precedence level for the top-most precedence rule, or -1 if
+// the parser context is not nested within a precedence rule.
+
+func (p *BaseParser) GetPrecedence() int {
+ if len(p.precedenceStack) == 0 {
+ return -1
+ }
+
+ return p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) {
+ p.SetState(state)
+ p.precedenceStack.Push(precedence)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+//
+// Like {@link //EnterRule} but for recursive rules.
+
+func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) {
+ previous := p.ctx
+ previous.SetParent(localctx)
+ previous.SetInvokingState(state)
+ previous.SetStop(p.input.LT(-1))
+
+ p.ctx = localctx
+ p.ctx.SetStart(previous.GetStart())
+ if p.BuildParseTrees {
+ p.ctx.AddChild(previous)
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
+ _, _ = p.precedenceStack.Pop()
+ p.ctx.SetStop(p.input.LT(-1))
+ retCtx := p.ctx // save current ctx (return value)
+ // unroll so ctx is as it was before call to recursive method
+ if p.parseListeners != nil {
+ for p.ctx != parentCtx {
+ p.TriggerExitRuleEvent()
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ }
+ } else {
+ p.ctx = parentCtx
+ }
+ // hook into tree
+ retCtx.SetParent(parentCtx)
+ if p.BuildParseTrees && parentCtx != nil {
+ // add return ctx into invoking rule's tree
+ parentCtx.AddChild(retCtx)
+ }
+}
+
+func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
+ ctx := p.ctx
+ for ctx != nil {
+ if ctx.GetRuleIndex() == ruleIndex {
+ return ctx
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ return nil
+}
+
+func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool {
+ return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+//goland:noinspection GoUnusedParameter
+func (p *BaseParser) inContext(context ParserRuleContext) bool {
+ // TODO: useful in parser?
+ return false
+}
+
+// IsExpectedToken checks whether symbol can follow the current state in the
+// {ATN}. The behavior of p.method is equivalent to the following, but is
+// implemented such that the complete context-sensitive follow set does not
+// need to be explicitly constructed.
+//
+// return getExpectedTokens().contains(symbol)
+func (p *BaseParser) IsExpectedToken(symbol int) bool {
+ atn := p.Interpreter.atn
+ ctx := p.ctx
+ s := atn.states[p.state]
+ following := atn.NextTokens(s, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ if !following.contains(TokenEpsilon) {
+ return false
+ }
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ if following.contains(TokenEpsilon) && symbol == TokenEOF {
+ return true
+ }
+
+ return false
+}
+
+// GetExpectedTokens and returns the set of input symbols which could follow the current parser
+// state and context, as given by [GetState] and [GetContext],
+// respectively.
+func (p *BaseParser) GetExpectedTokens() *IntervalSet {
+ return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
+}
+
+func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
+ atn := p.Interpreter.atn
+ s := atn.states[p.state]
+ return atn.NextTokens(s, nil)
+}
+
+// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found.
+func (p *BaseParser) GetRuleIndex(ruleName string) int {
+ var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
+ if ok {
+ return ruleIndex
+ }
+
+ return -1
+}
+
+// GetRuleInvocationStack returns a list of the rule names in your parser instance
+// leading up to a call to the current rule. You could override if
+// you want more details such as the file/line info of where
+// in the ATN a rule is invoked.
+func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
+ if c == nil {
+ c = p.ctx
+ }
+ stack := make([]string, 0)
+ for c != nil {
+ // compute what follows who invoked us
+ ruleIndex := c.GetRuleIndex()
+ if ruleIndex < 0 {
+ stack = append(stack, "n/a")
+ } else {
+ stack = append(stack, p.GetRuleNames()[ruleIndex])
+ }
+
+ vp := c.GetParent()
+
+ if vp == nil {
+ break
+ }
+
+ c = vp.(ParserRuleContext)
+ }
+ return stack
+}
+
+// GetDFAStrings returns a list of all DFA states used for debugging purposes
+func (p *BaseParser) GetDFAStrings() string {
+ return fmt.Sprint(p.Interpreter.decisionToDFA)
+}
+
+// DumpDFA prints the whole of the DFA for debugging
+func (p *BaseParser) DumpDFA() {
+ seenOne := false
+ for _, dfa := range p.Interpreter.decisionToDFA {
+ if dfa.Len() > 0 {
+ if seenOne {
+ fmt.Println()
+ }
+ fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
+ fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
+ seenOne = true
+ }
+ }
+}
+
+func (p *BaseParser) GetSourceName() string {
+ return p.GrammarFileName
+}
+
+// SetTrace installs a trace listener for the parse.
+//
+// During a parse it is sometimes useful to listen in on the rule entry and exit
+// events as well as token Matches. This is for quick and dirty debugging.
+func (p *BaseParser) SetTrace(trace *TraceListener) {
+ if trace == nil {
+ p.RemoveParseListener(p.tracer)
+ p.tracer = nil
+ } else {
+ if p.tracer != nil {
+ p.RemoveParseListener(p.tracer)
+ }
+ p.tracer = NewTraceListener(p)
+ p.AddParseListener(p.tracer)
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
new file mode 100644
index 000000000..ae2869692
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
@@ -0,0 +1,1668 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var ()
+
+// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over
+// a standard JStore so that we can use Lazy instantiation of the JStore, mostly
+// to avoid polluting the stats module with a ton of JStore instances with nothing in them.
+type ClosureBusy struct {
+ bMap *JStore[*ATNConfig, Comparator[*ATNConfig]]
+ desc string
+}
+
+// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules
+func NewClosureBusy(desc string) *ClosureBusy {
+ return &ClosureBusy{
+ desc: desc,
+ }
+}
+
+func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) {
+ if c.bMap == nil {
+ c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc)
+ }
+ return c.bMap.Put(config)
+}
+
+type ParserATNSimulator struct {
+ BaseATNSimulator
+
+ parser Parser
+ predictionMode int
+ input TokenStream
+ startIndex int
+ dfa *DFA
+ mergeCache *JPCMap
+ outerContext ParserRuleContext
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
+
+ p := &ParserATNSimulator{
+ BaseATNSimulator: BaseATNSimulator{
+ atn: atn,
+ sharedContextCache: sharedContextCache,
+ },
+ }
+
+ p.parser = parser
+ p.decisionToDFA = decisionToDFA
+ // SLL, LL, or LL + exact ambig detection?//
+ p.predictionMode = PredictionModeLL
+ // LAME globals to avoid parameters!!!!! I need these down deep in predTransition
+ p.input = nil
+ p.startIndex = 0
+ p.outerContext = nil
+ p.dfa = nil
+ // Each prediction operation uses a cache for merge of prediction contexts.
+ // Don't keep around as it wastes huge amounts of memory. [JPCMap]
+ // isn't Synchronized, but we're ok since two threads shouldn't reuse same
+ // parser/atn-simulator object because it can only handle one input at a time.
+ // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid
+ // the merge if we ever see a and b again. Note that (b,a) -> c should
+ // also be examined during cache lookup.
+ //
+ p.mergeCache = nil
+
+ return p
+}
+
+func (p *ParserATNSimulator) GetPredictionMode() int {
+ return p.predictionMode
+}
+
+func (p *ParserATNSimulator) SetPredictionMode(v int) {
+ p.predictionMode = v
+}
+
+func (p *ParserATNSimulator) reset() {
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
+ strconv.Itoa(input.LT(1).GetColumn()))
+ }
+ p.input = input
+ p.startIndex = input.Index()
+ p.outerContext = outerContext
+
+ dfa := p.decisionToDFA[decision]
+ p.dfa = dfa
+ m := input.Mark()
+ index := input.Index()
+
+ defer func() {
+ p.dfa = nil
+ p.mergeCache = nil // whack cache after each prediction
+ // Do not attempt to run a GC now that we're done with the cache as makes the
+ // GC overhead terrible for badly formed grammars and has little effect on well formed
+ // grammars.
+ // I have made some extra effort to try and reduce memory pressure by reusing allocations when
+ // possible. However, it can only have a limited effect. The real solution is to encourage grammar
+ // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect
+ // what is happening at runtime, along with using the error listener to report ambiguities.
+
+ input.Seek(index)
+ input.Release(m)
+ }()
+
+ // Now we are certain to have a specific decision's DFA
+ // But, do we still need an initial state?
+ var s0 *DFAState
+ p.atn.stateMu.RLock()
+ if dfa.getPrecedenceDfa() {
+ p.atn.edgeMu.RLock()
+ // the start state for a precedence DFA depends on the current
+ // parser precedence, and is provided by a DFA method.
+ s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence())
+ p.atn.edgeMu.RUnlock()
+ } else {
+ // the start state for a "regular" DFA is just s0
+ s0 = dfa.getS0()
+ }
+ p.atn.stateMu.RUnlock()
+
+ if s0 == nil {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
+ }
+ fullCtx := false
+ s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx)
+
+ p.atn.stateMu.Lock()
+ if dfa.getPrecedenceDfa() {
+ // If p is a precedence DFA, we use applyPrecedenceFilter
+ // to convert the computed start state to a precedence start
+ // state. We then use DFA.setPrecedenceStartState to set the
+ // appropriate start state for the precedence level rather
+ // than simply setting DFA.s0.
+ //
+ dfa.s0.configs = s0Closure
+ s0Closure = p.applyPrecedenceFilter(s0Closure)
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ p.atn.edgeMu.Lock()
+ dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0)
+ p.atn.edgeMu.Unlock()
+ } else {
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ dfa.setS0(s0)
+ }
+ p.atn.stateMu.Unlock()
+ }
+
+ alt, re := p.execATN(dfa, s0, input, index, outerContext)
+ parser.SetError(re)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
+ }
+ return alt
+
+}
+
+// execATN performs ATN simulation to compute a predicted alternative based
+// upon the remaining input, but also updates the DFA cache to avoid
+// having to traverse the ATN again for the same input sequence.
+//
+// There are some key conditions we're looking for after computing a new
+// set of ATN configs (proposed DFA state):
+//
+// - If the set is empty, there is no viable alternative for current symbol
+// - Does the state uniquely predict an alternative?
+// - Does the state have a conflict that would prevent us from
+// putting it on the work list?
+//
+// We also have some key operations to do:
+//
+// - Add an edge from previous DFA state to potentially NewDFA state, D,
+// - Upon current symbol but only if adding to work list, which means in all
+// cases except no viable alternative (and possibly non-greedy decisions?)
+// - Collecting predicates and adding semantic context to DFA accept states
+// - adding rule context to context-sensitive DFA accept states
+// - Consuming an input symbol
+// - Reporting a conflict
+// - Reporting an ambiguity
+// - Reporting a context sensitivity
+// - Reporting insufficient predicates
+//
+// Cover these cases:
+//
+// - dead end
+// - single alt
+// - single alt + predicates
+// - conflict
+// - conflict + predicates
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
+
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
+ ", DFA state " + s0.String() +
+ ", LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
+ }
+
+ previousD := s0
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("s0 = " + s0.String())
+ }
+ t := input.LA(1)
+ for { // for more work
+ D := p.getExistingTargetState(previousD, t)
+ if D == nil {
+ D = p.computeTargetState(dfa, previousD, t)
+ }
+ if D == ATNSimulatorError {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for SLL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previousD.configs, startIndex)
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt, nil
+ }
+ p.parser.SetError(e)
+ return ATNInvalidAltNumber, e
+ }
+ if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
+ // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
+ conflictingAlts := D.configs.conflictingAlts
+ if D.predicates != nil {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("DFA state has preds in DFA sim LL fail-over")
+ }
+ conflictIndex := input.Index()
+ if conflictIndex != startIndex {
+ input.Seek(startIndex)
+ }
+ conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
+ if conflictingAlts.length() == 1 {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("Full LL avoided")
+ }
+ return conflictingAlts.minValue(), nil
+ }
+ if conflictIndex != startIndex {
+ // restore the index so Reporting the fallback to full
+ // context occurs with the index at the correct spot
+ input.Seek(conflictIndex)
+ }
+ }
+ if runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
+ }
+ fullCtx := true
+ s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
+ p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
+ alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
+ return alt, re
+ }
+ if D.isAcceptState {
+ if D.predicates == nil {
+ return D.prediction, nil
+ }
+ stopIndex := input.Index()
+ input.Seek(startIndex)
+ alts := p.evalSemanticContext(D.predicates, outerContext, true)
+
+ switch alts.length() {
+ case 0:
+ return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex)
+ case 1:
+ return alts.minValue(), nil
+ default:
+ // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
+ p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
+ return alts.minValue(), nil
+ }
+ }
+ previousD = D
+
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// p method returns {@code nil}.
+//
+// @param previousD The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for p edge is not
+// already cached
+
+func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
+ if t+1 < 0 {
+ return nil
+ }
+
+ p.atn.edgeMu.RLock()
+ defer p.atn.edgeMu.RUnlock()
+ edges := previousD.getEdges()
+ if edges == nil || t+1 >= len(edges) {
+ return nil
+ }
+ return previousD.getIthEdge(t + 1)
+}
+
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
+//
+// @param dfa The DFA
+// @param previousD The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, p method
+// returns {@link //ERROR}.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
+ reach := p.computeReachSet(previousD.configs, t, false)
+
+ if reach == nil {
+ p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
+ return ATNSimulatorError
+ }
+ // create new target state we'll add to DFA after it's complete
+ D := NewDFAState(-1, reach)
+
+ predictedAlt := p.getUniqueAlt(reach)
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
+ ", previous=" + previousD.configs.String() +
+ ", configs=" + reach.String() +
+ ", predict=" + strconv.Itoa(predictedAlt) +
+ ", allSubsetsConflict=" +
+ fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) +
+ ", conflictingAlts=" + p.getConflictingAlts(reach).String())
+ }
+ if predictedAlt != ATNInvalidAltNumber {
+ // NO CONFLICT, UNIQUELY PREDICTED ALT
+ D.isAcceptState = true
+ D.configs.uniqueAlt = predictedAlt
+ D.setPrediction(predictedAlt)
+ } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
+ // MORE THAN ONE VIABLE ALTERNATIVE
+ D.configs.conflictingAlts = p.getConflictingAlts(reach)
+ D.requiresFullContext = true
+ // in SLL-only mode, we will stop at p state and return the minimum alt
+ D.isAcceptState = true
+ D.setPrediction(D.configs.conflictingAlts.minValue())
+ }
+ if D.isAcceptState && D.configs.hasSemanticContext {
+ p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
+ if D.predicates != nil {
+ D.setPrediction(ATNInvalidAltNumber)
+ }
+ }
+ // all adds to dfa are done after we've created full D state
+ D = p.addDFAEdge(dfa, previousD, t, D)
+ return D
+}
+
+func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) {
+ // We need to test all predicates, even in DFA states that
+ // uniquely predict alternative.
+ nalts := len(decisionState.GetTransitions())
+ // Update DFA so reach becomes accept state with (predicate,alt)
+ // pairs if preds found for conflicting alts
+ altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs)
+ altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
+ if altToPred != nil {
+ dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
+ dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds
+ } else {
+ // There are preds in configs but they might go away
+ // when OR'd together like {p}? || NONE == NONE. If neither
+ // alt has preds, resolve to min alt
+ dfaState.setPrediction(altsToCollectPredsFrom.minValue())
+ }
+}
+
+// comes back with reach.uniqueAlt set to a valid alt
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
+
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("execATNWithFullContext " + s0.String())
+ }
+
+ fullCtx := true
+ foundExactAmbig := false
+ var reach *ATNConfigSet
+ previous := s0
+ input.Seek(startIndex)
+ t := input.LA(1)
+ predictedAlt := -1
+
+ for { // for more work
+ reach = p.computeReachSet(previous, t, fullCtx)
+ if reach == nil {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for LL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt, nil
+ }
+ return alt, p.noViableAlt(input, outerContext, previous, startIndex)
+ }
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
+ strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
+ fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
+ }
+ reach.uniqueAlt = p.getUniqueAlt(reach)
+ // unique prediction?
+ if reach.uniqueAlt != ATNInvalidAltNumber {
+ predictedAlt = reach.uniqueAlt
+ break
+ }
+ if p.predictionMode != PredictionModeLLExactAmbigDetection {
+ predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets)
+ if predictedAlt != ATNInvalidAltNumber {
+ break
+ }
+ } else {
+ // In exact ambiguity mode, we never try to terminate early.
+ // Just keeps scarfing until we know what the conflict is
+ if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) {
+ foundExactAmbig = true
+ predictedAlt = PredictionModegetSingleViableAlt(altSubSets)
+ break
+ }
+ // else there are multiple non-conflicting subsets or
+ // we're not sure what the ambiguity is yet.
+ // So, keep going.
+ }
+ previous = reach
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+ // If the configuration set uniquely predicts an alternative,
+ // without conflict, then we know that it's a full LL decision
+ // not SLL.
+ if reach.uniqueAlt != ATNInvalidAltNumber {
+ p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
+ return predictedAlt, nil
+ }
+ // We do not check predicates here because we have checked them
+ // on-the-fly when doing full context prediction.
+
+ //
+ // In non-exact ambiguity detection mode, we might actually be able to
+ // detect an exact ambiguity, but I'm not going to spend the cycles
+ // needed to check. We only emit ambiguity warnings in exact ambiguity
+ // mode.
+ //
+ // For example, we might know that we have conflicting configurations.
+ // But, that does not mean that there is no way forward without a
+ // conflict. It's possible to have non-conflicting alt subsets as in:
+ //
+ // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
+ //
+ // from
+ //
+ // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
+ // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
+ //
+ // In p case, (17,1,[5 $]) indicates there is some next sequence that
+ // would resolve p without conflict to alternative 1. Any other viable
+ // next sequence, however, is associated with a conflict. We stop
+ // looking for input because no amount of further lookahead will alter
+ // the fact that we should predict alternative 1. We just can't say for
+ // sure that there is an ambiguity without looking further.
+
+ p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach)
+
+ return predictedAlt, nil
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet {
+ if p.mergeCache == nil {
+ p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()")
+ }
+ intermediate := NewATNConfigSet(fullCtx)
+
+ // Configurations already in a rule stop state indicate reaching the end
+ // of the decision rule (local context) or end of the start rule (full
+ // context). Once reached, these configurations are never updated by a
+ // closure operation, so they are handled separately for the performance
+ // advantage of having a smaller intermediate set when calling closure.
+ //
+ // For full-context reach operations, separate handling is required to
+ // ensure that the alternative Matching the longest overall sequence is
+ // chosen when multiple such configurations can Match the input.
+
+ var skippedStopStates []*ATNConfig
+
+ // First figure out where we can reach on input t
+ for _, c := range closure.configs {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
+ }
+
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ if fullCtx || t == TokenEOF {
+ skippedStopStates = append(skippedStopStates, c)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("added " + c.String() + " to SkippedStopStates")
+ }
+ }
+ continue
+ }
+
+ for _, trans := range c.GetState().GetTransitions() {
+ target := p.getReachableTarget(trans, t)
+ if target != nil {
+ cfg := NewATNConfig4(c, target)
+ intermediate.Add(cfg, p.mergeCache)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("added " + cfg.String() + " to intermediate")
+ }
+ }
+ }
+ }
+
+ // Now figure out where the reach operation can take us...
+ var reach *ATNConfigSet
+
+ // This block optimizes the reach operation for intermediate sets which
+ // trivially indicate a termination state for the overall
+ // AdaptivePredict operation.
+ //
+ // The conditions assume that intermediate
+ // contains all configurations relevant to the reach set, but p
+ // condition is not true when one or more configurations have been
+ // withheld in SkippedStopStates, or when the current symbol is EOF.
+ //
+ if skippedStopStates == nil && t != TokenEOF {
+ if len(intermediate.configs) == 1 {
+ // Don't pursue the closure if there is just one state.
+ // It can only have one alternative just add to result
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber {
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ }
+ }
+ // If the reach set could not be trivially determined, perform a closure
+ // operation on the intermediate set to compute its initial value.
+ //
+ if reach == nil {
+ reach = NewATNConfigSet(fullCtx)
+ closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy")
+ treatEOFAsEpsilon := t == TokenEOF
+ amount := len(intermediate.configs)
+ for k := 0; k < amount; k++ {
+ p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon)
+ }
+ }
+ if t == TokenEOF {
+ // After consuming EOF no additional input is possible, so we are
+ // only interested in configurations which reached the end of the
+ // decision rule (local context) or end of the start rule (full
+ // context). Update reach to contain only these configurations. This
+ // handles both explicit EOF transitions in the grammar and implicit
+ // EOF transitions following the end of the decision or start rule.
+ //
+ // When reach==intermediate, no closure operation was performed. In
+ // p case, removeAllConfigsNotInRuleStopState needs to check for
+ // reachable rule stop states as well as configurations already in
+ // a rule stop state.
+ //
+ // This is handled before the configurations in SkippedStopStates,
+ // because any configurations potentially added from that list are
+ // already guaranteed to meet this condition whether it's
+ // required.
+ //
+ reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate))
+ }
+ // If SkippedStopStates!=nil, then it contains at least one
+ // configuration. For full-context reach operations, these
+ // configurations reached the end of the start rule, in which case we
+ // only add them back to reach if no configuration during the current
+ // closure operation reached such a state. This ensures AdaptivePredict
+ // chooses an alternative Matching the longest overall sequence when
+ // multiple alternatives are viable.
+ //
+ if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
+ for l := 0; l < len(skippedStopStates); l++ {
+ reach.Add(skippedStopStates[l], p.mergeCache)
+ }
+ }
+
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String())
+ }
+
+ if len(reach.configs) == 0 {
+ return nil
+ }
+
+ return reach
+}
+
+// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from
+// configs which are in a [RuleStopState]. If all
+// configurations in configs are already in a rule stop state, this
+// method simply returns configs.
+//
+// When lookToEndOfRule is true, this method uses
+// [ATN].[NextTokens] for each configuration in configs which is
+// not already in a rule stop state to see if a rule stop state is reachable
+// from the configuration via epsilon-only transitions.
+//
+// When lookToEndOfRule is true, this method checks for rule stop states
+// reachable by epsilon-only transitions from each configuration in
+// configs.
+//
+// The func returns configs if all configurations in configs are in a
+// rule stop state, otherwise it returns a new configuration set containing only
+// the configurations from configs which are in a rule stop state
+func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet {
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return configs
+ }
+ result := NewATNConfigSet(configs.fullCtx)
+ for _, config := range configs.configs {
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ result.Add(config, p.mergeCache)
+ continue
+ }
+ if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() {
+ NextTokens := p.atn.NextTokens(config.GetState(), nil)
+ if NextTokens.contains(TokenEpsilon) {
+ endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
+ result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache)
+ }
+ }
+ }
+ return result
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet {
+ // always at least the implicit call to start rule
+ initialContext := predictionContextFromRuleContext(p.atn, ctx)
+ configs := NewATNConfigSet(fullCtx)
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("computeStartState from ATN state " + a.String() +
+ " initialContext=" + initialContext.String())
+ }
+
+ for i := 0; i < len(a.GetTransitions()); i++ {
+ target := a.GetTransitions()[i].getTarget()
+ c := NewATNConfig6(target, i+1, initialContext)
+ closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy")
+ p.closure(c, configs, closureBusy, true, fullCtx, false)
+ }
+ return configs
+}
+
+// applyPrecedenceFilter transforms the start state computed by
+// [computeStartState] to the special start state used by a
+// precedence [DFA] for a particular precedence value. The transformation
+// process applies the following changes to the start state's configuration
+// set.
+//
+// 1. Evaluate the precedence predicates for each configuration using
+// [SemanticContext].evalPrecedence.
+// 2. Remove all configurations which predict an alternative greater than
+// 1, for which another configuration that predicts alternative 1 is in the
+// same ATN state with the same prediction context.
+//
+// Transformation 2 is valid for the following reasons:
+//
+// - The closure block cannot contain any epsilon transitions which bypass
+// the body of the closure, so all states reachable via alternative 1 are
+// part of the precedence alternatives of the transformed left-recursive
+// rule.
+// - The "primary" portion of a left recursive rule cannot contain an
+// epsilon transition, so the only way an alternative other than 1 can exist
+// in a state that is also reachable via alternative 1 is by nesting calls
+// to the left-recursive rule, with the outer calls not being at the
+// preferred precedence level.
+//
+// The prediction context must be considered by this filter to address
+// situations like the following:
+//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+//
+// In the above grammar, the [ATN] state immediately before the token
+// reference 'a' in letterA is reachable from the left edge
+// of both the primary and closure blocks of the left-recursive rule
+// statement. The prediction context associated with each of these
+// configurations distinguishes between them, and prevents the alternative
+// which stepped out to prog, and then back in to statement
+// from being eliminated by the filter.
+//
+// The func returns the transformed configuration set representing the start state
+// for a precedence [DFA] at a particular precedence level (determined by
+// calling [Parser].getPrecedence).
+func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet {
+
+ statesFromAlt1 := make(map[int]*PredictionContext)
+ configSet := NewATNConfigSet(configs.fullCtx)
+
+ for _, config := range configs.configs {
+ // handle alt 1 first
+ if config.GetAlt() != 1 {
+ continue
+ }
+ updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext)
+ if updatedContext == nil {
+ // the configuration was eliminated
+ continue
+ }
+ statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
+ if updatedContext != config.GetSemanticContext() {
+ configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache)
+ } else {
+ configSet.Add(config, p.mergeCache)
+ }
+ }
+ for _, config := range configs.configs {
+
+ if config.GetAlt() == 1 {
+ // already handled
+ continue
+ }
+ // In the future, p elimination step could be updated to also
+ // filter the prediction context for alternatives predicting alt>1
+ // (basically a graph subtraction algorithm).
+ if !config.getPrecedenceFilterSuppressed() {
+ context := statesFromAlt1[config.GetState().GetStateNumber()]
+ if context != nil && context.Equals(config.GetContext()) {
+ // eliminated
+ continue
+ }
+ }
+ configSet.Add(config, p.mergeCache)
+ }
+ return configSet
+}
+
+func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState {
+ if trans.Matches(ttype, 0, p.atn.maxTokenType) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext {
+
+ altToPred := make([]SemanticContext, nalts+1)
+ for _, c := range configs.configs {
+ if ambigAlts.contains(c.GetAlt()) {
+ altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
+ }
+ }
+ nPredAlts := 0
+ for i := 1; i <= nalts; i++ {
+ pred := altToPred[i]
+ if pred == nil {
+ altToPred[i] = SemanticContextNone
+ } else if pred != SemanticContextNone {
+ nPredAlts++
+ }
+ }
+ // unambiguous alts are nil in altToPred
+ if nPredAlts == 0 {
+ altToPred = nil
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
+ }
+ return altToPred
+}
+
+func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction {
+ pairs := make([]*PredPrediction, 0)
+ containsPredicate := false
+ for i := 1; i < len(altToPred); i++ {
+ pred := altToPred[i]
+ // un-predicated is indicated by SemanticContextNONE
+ if ambigAlts != nil && ambigAlts.contains(i) {
+ pairs = append(pairs, NewPredPrediction(pred, i))
+ }
+ if pred != SemanticContextNone {
+ containsPredicate = true
+ }
+ }
+ if !containsPredicate {
+ return nil
+ }
+ return pairs
+}
+
+// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by
+// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the
+// Error state was reached during [ATN] simulation.
+//
+// The default implementation of this method uses the following
+// algorithm to identify an [ATN] configuration which successfully parsed the
+// decision entry rule. Choosing such an alternative ensures that the
+// [ParserRuleContext] returned by the calling rule will be complete
+// and valid, and the syntax error will be Reported later at a more
+// localized location.
+//
+// - If a syntactically valid path or paths reach the end of the decision rule, and
+// they are semantically valid if predicated, return the min associated alt.
+// - Else, if a semantically invalid but syntactically valid path exist
+// or paths exist, return the minimum associated alt.
+// - Otherwise, return [ATNInvalidAltNumber].
+//
+// In some scenarios, the algorithm described above could predict an
+// alternative which will result in a [FailedPredicateException] in
+// the parser. Specifically, this could occur if the only configuration
+// capable of successfully parsing to the end of the decision rule is
+// blocked by a semantic predicate. By choosing this alternative within
+// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting
+// [FailedPredicateException] in the parser will identify the specific
+// predicate which is preventing the parser from successfully parsing the
+// decision rule, which helps developers identify and correct logic errors
+// in semantic predicates.
+//
+// pass in the configs holding ATN configurations which were valid immediately before
+// the ERROR state was reached, outerContext as the initial parser context from the paper
+// or the parser stack at the instant before prediction commences.
+//
+// Teh func returns the value to return from [AdaptivePredict], or
+// [ATNInvalidAltNumber] if a suitable alternative was not
+// identified and [AdaptivePredict] should report an error instead.
+func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int {
+ cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
+ semValidConfigs := cfgs[0]
+ semInvalidConfigs := cfgs[1]
+ alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs)
+ if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists
+ return alt
+ }
+ // Is there a syntactically valid path with a failed pred?
+ if len(semInvalidConfigs.configs) > 0 {
+ alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
+ if alt != ATNInvalidAltNumber { // syntactically viable path exists
+ return alt
+ }
+ }
+ return ATNInvalidAltNumber
+}
+
+func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int {
+ alts := NewIntervalSet()
+
+ for _, c := range configs.configs {
+ _, ok := c.GetState().(*RuleStopState)
+
+ if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
+ alts.addOne(c.GetAlt())
+ }
+ }
+ if alts.length() == 0 {
+ return ATNInvalidAltNumber
+ }
+
+ return alts.first()
+}
+
+// Walk the list of configurations and split them according to
+// those that have preds evaluating to true/false. If no pred, assume
+// true pred and include in succeeded set. Returns Pair of sets.
+//
+// Create a NewSet so as not to alter the incoming parameter.
+//
+// Assumption: the input stream has been restored to the starting point
+// prediction, which is where predicates need to evaluate.
+
+type ATNConfigSetPair struct {
+ item0, item1 *ATNConfigSet
+}
+
+func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet {
+ succeeded := NewATNConfigSet(configs.fullCtx)
+ failed := NewATNConfigSet(configs.fullCtx)
+
+ for _, c := range configs.configs {
+ if c.GetSemanticContext() != SemanticContextNone {
+ predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
+ if predicateEvaluationResult {
+ succeeded.Add(c, nil)
+ } else {
+ failed.Add(c, nil)
+ }
+ } else {
+ succeeded.Add(c, nil)
+ }
+ }
+ return []*ATNConfigSet{succeeded, failed}
+}
+
+// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the
+// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an
+// un-predicated runtimeConfig which behaves as "always true." If !complete
+// then we stop at the first predicate that evaluates to true. This
+// includes pairs with nil predicates.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
+ predictions := NewBitSet()
+ for i := 0; i < len(predPredictions); i++ {
+ pair := predPredictions[i]
+ if pair.pred == SemanticContextNone {
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ continue
+ }
+
+ predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
+ }
+ if predicateEvaluationResult {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
+ }
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ }
+ }
+ return predictions
+}
+
+func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
+ initialDepth := 0
+ p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
+ fullCtx, initialDepth, treatEOFAsEpsilon)
+}
+
+func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ var stack []*ATNConfig
+ visited := make(map[*ATNConfig]bool)
+
+ stack = append(stack, config)
+
+ for len(stack) > 0 {
+ currConfig := stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+
+ if _, ok := visited[currConfig]; ok {
+ continue
+ }
+ visited[currConfig] = true
+
+ if _, ok := currConfig.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !currConfig.GetContext().isEmpty() {
+ for i := 0; i < currConfig.GetContext().length(); i++ {
+ if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY)
+ configs.Add(nb, p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
+ }
+ p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[currConfig.GetContext().getReturnState(i)]
+ newContext := currConfig.GetContext().GetParent(i) // "pop" return state
+
+ c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext())
+
+ stack = append(stack, c)
+ }
+ continue
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(currConfig, p.mergeCache)
+ continue
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
+ }
+ }
+ }
+
+ p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !config.GetContext().isEmpty() {
+ for i := 0; i < config.GetContext().length(); i++ {
+ if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY)
+ configs.Add(nb, p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ newContext := config.GetContext().GetParent(i) // "pop" return state
+
+ c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext())
+ p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon)
+ }
+ return
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(config, p.mergeCache)
+ return
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ }
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+}
+
+// Do the actual work of walking epsilon edges
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ state := config.GetState()
+ // optimization
+ if !state.GetEpsilonOnlyTransitions() {
+ configs.Add(config, p.mergeCache)
+ // make sure to not return here, because EOF transitions can act as
+ // both epsilon transitions and non-epsilon transitions.
+ }
+ for i := 0; i < len(state.GetTransitions()); i++ {
+ if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) {
+ continue
+ }
+
+ t := state.GetTransitions()[i]
+ _, ok := t.(*ActionTransition)
+ continueCollecting := collectPredicates && !ok
+ c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
+ if c != nil {
+ newDepth := depth
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // target fell off end of rule mark resulting c as having dipped into outer context
+ // We can't get here if incoming config was rule stop and we had context
+ // track how far we dip into outer context. Might
+ // come in handy and we avoid evaluating context dependent
+ // preds if this is > 0.
+
+ if p.dfa != nil && p.dfa.getPrecedenceDfa() {
+ if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
+ c.setPrecedenceFilterSuppressed(true)
+ }
+ }
+
+ c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
+
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for right-recursive rules
+ continue
+ }
+
+ configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method
+ newDepth--
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("dips into outer ctx: " + c.String())
+ }
+ } else {
+
+ if !t.getIsEpsilon() {
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for EOF* and EOF+
+ continue
+ }
+ }
+ if _, ok := t.(*RuleTransition); ok {
+ // latch when newDepth goes negative - once we step out of the entry context we can't return
+ if newDepth >= 0 {
+ newDepth++
+ }
+ }
+ }
+ p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon)
+ }
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool {
+ if !runtimeConfig.lRLoopEntryBranchOpt {
+ return false
+ }
+
+ _p := config.GetState()
+
+ // First check to see if we are in StarLoopEntryState generated during
+ // left-recursion elimination. For efficiency, also check if
+ // the context has an empty stack case. If so, it would mean
+ // global FOLLOW so we can't perform optimization
+ if _p.GetStateType() != ATNStateStarLoopEntry {
+ return false
+ }
+ startLoop, ok := _p.(*StarLoopEntryState)
+ if !ok {
+ return false
+ }
+ if !startLoop.precedenceRuleDecision ||
+ config.GetContext().isEmpty() ||
+ config.GetContext().hasEmptyPath() {
+ return false
+ }
+
+ // Require all return states to return back to the same rule
+ // that p is in.
+ numCtxs := config.GetContext().length()
+ for i := 0; i < numCtxs; i++ {
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ if returnState.GetRuleIndex() != _p.GetRuleIndex() {
+ return false
+ }
+ }
+ x := _p.GetTransitions()[0].getTarget()
+ decisionStartState := x.(BlockStartState)
+ blockEndStateNum := decisionStartState.getEndState().stateNumber
+ blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
+
+ // Verify that the top of each stack context leads to loop entry/exit
+ // state through epsilon edges and w/o leaving rule.
+
+ for i := 0; i < numCtxs; i++ { // for each stack context
+ returnStateNumber := config.GetContext().getReturnState(i)
+ returnState := p.atn.states[returnStateNumber]
+
+ // all states must have single outgoing epsilon edge
+ if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() {
+ return false
+ }
+
+ // Look for prefix op case like 'not expr', (' type ')' expr
+ returnStateTarget := returnState.GetTransitions()[0].getTarget()
+ if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p {
+ continue
+ }
+
+ // Look for 'expr op expr' or case where expr's return state is block end
+ // of (...)* internal block; the block end points to loop back
+ // which points to p but we don't need to check that
+ if returnState == blockEndState {
+ continue
+ }
+
+ // Look for ternary expr ? expr : expr. The return state points at block end,
+ // which points at loop entry state
+ if returnStateTarget == blockEndState {
+ continue
+ }
+
+ // Look for complex prefix 'between expr and expr' case where 2nd expr's
+ // return state points at block end state of (...)* internal block
+ if returnStateTarget.GetStateType() == ATNStateBlockEnd &&
+ len(returnStateTarget.GetTransitions()) == 1 &&
+ returnStateTarget.GetTransitions()[0].getIsEpsilon() &&
+ returnStateTarget.GetTransitions()[0].getTarget() == _p {
+ continue
+ }
+
+ // anything else ain't conforming
+ return false
+ }
+
+ return true
+}
+
+func (p *ParserATNSimulator) getRuleName(index int) string {
+ if p.parser != nil && index >= 0 {
+ return p.parser.GetRuleNames()[index]
+ }
+ var sb strings.Builder
+ sb.Grow(32)
+
+ sb.WriteString("')
+ return sb.String()
+}
+
+func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) *ATNConfig {
+
+ switch t.getSerializationType() {
+ case TransitionRULE:
+ return p.ruleTransition(config, t.(*RuleTransition))
+ case TransitionPRECEDENCE:
+ return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionPREDICATE:
+ return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionACTION:
+ return p.actionTransition(config, t.(*ActionTransition))
+ case TransitionEPSILON:
+ return NewATNConfig4(config, t.getTarget())
+ case TransitionATOM, TransitionRANGE, TransitionSET:
+ // EOF transitions act like epsilon transitions after the first EOF
+ // transition is traversed
+ if treatEOFAsEpsilon {
+ if t.Matches(TokenEOF, 0, 1) {
+ return NewATNConfig4(config, t.getTarget())
+ }
+ }
+ return nil
+ default:
+ return nil
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) actionTransition(config *ATNConfig, t *ActionTransition) *ATNConfig {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex))
+ }
+ return NewATNConfig4(config, t.getTarget())
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) precedenceTransition(config *ATNConfig,
+ pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig {
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " +
+ strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true")
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *ATNConfig
+ if collectPredicates && inContext {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the runtimeConfig sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewATNConfig4(config, pt.getTarget())
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("runtimeConfig from pred transition=" + c.String())
+ }
+ return c
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig {
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) +
+ ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent))
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *ATNConfig
+ if collectPredicates && (!pt.isCtxDependent || inContext) {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the config sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewATNConfig4(config, pt.getTarget())
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
+ }
+ return c
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ruleTransition(config *ATNConfig, t *RuleTransition) *ATNConfig {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String())
+ }
+ returnState := t.followState
+ newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber())
+ return NewATNConfig1(config, t.getTarget(), newContext)
+}
+
+func (p *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet {
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModeGetAlts(altsets)
+}
+
+// getConflictingAltsOrUniqueAlt Sam pointed out a problem with the previous definition, v3, of
+// ambiguous states. If we have another state associated with conflicting
+// alternatives, we should keep going. For example, the following grammar
+//
+// s : (ID | ID ID?) ;
+//
+// When the [ATN] simulation reaches the state before ;, it has a [DFA]
+// state that looks like:
+//
+// [12|1|[], 6|2|[], 12|2|[]].
+//
+// Naturally
+//
+// 12|1|[] and 12|2|[]
+//
+// conflict, but we cannot stop processing this node
+// because alternative to has another way to continue, via
+//
+// [6|2|[]].
+//
+// The key is that we have a single state that has config's only associated
+// with a single alternative, 2, and crucially the state transitions
+// among the configurations are all non-epsilon transitions. That means
+// we don't consider any conflicts that include alternative 2. So, we
+// ignore the conflict between alts 1 and 2. We ignore a set of
+// conflicting alts when there is an intersection with an alternative
+// associated with a single alt state in the state config-list map.
+//
+// It's also the case that we might have two conflicting configurations but
+// also a 3rd non-conflicting configuration for a different alternative:
+//
+// [1|1|[], 1|2|[], 8|3|[]].
+//
+// This can come about from grammar:
+//
+// a : A | A | A B
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue, so we do not
+// stop working on this state.
+//
+// In the previous example, we're concerned
+// with states associated with the conflicting alternatives. Here alt
+// 3 is not associated with the conflicting configs, but since we can continue
+// looking for input reasonably, I don't declare the state done. We
+// ignore a set of conflicting alts when we have an alternative
+// that we still need to pursue.
+func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet {
+ var conflictingAlts *BitSet
+ if configs.uniqueAlt != ATNInvalidAltNumber {
+ conflictingAlts = NewBitSet()
+ conflictingAlts.add(configs.uniqueAlt)
+ } else {
+ conflictingAlts = configs.conflictingAlts
+ }
+ return conflictingAlts
+}
+
+func (p *ParserATNSimulator) GetTokenName(t int) string {
+ if t == TokenEOF {
+ return "EOF"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) {
+ return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) {
+ return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+
+ return strconv.Itoa(t)
+}
+
+func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
+ return p.GetTokenName(input.LA(1))
+}
+
+// Used for debugging in [AdaptivePredict] around [execATN], but I cut
+// it out for clarity now that alg. works well. We can leave this
+// "dead" code for a bit.
+func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) {
+
+ panic("Not implemented")
+
+ // fmt.Println("dead end configs: ")
+ // var decs = nvae.deadEndConfigs
+ //
+ // for i:=0; i0) {
+ // var t = c.state.GetTransitions()[0]
+ // if t2, ok := t.(*AtomTransition); ok {
+ // trans = "Atom "+ p.GetTokenName(t2.label)
+ // } else if t3, ok := t.(SetTransition); ok {
+ // _, ok := t.(*NotSetTransition)
+ //
+ // var s string
+ // if (ok){
+ // s = "~"
+ // }
+ //
+ // trans = s + "Set " + t3.set
+ // }
+ // }
+ // fmt.Errorf(c.String(p.parser, true) + ":" + trans)
+ // }
+}
+
+func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException {
+ return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext)
+}
+
+func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int {
+ alt := ATNInvalidAltNumber
+ for _, c := range configs.configs {
+ if alt == ATNInvalidAltNumber {
+ alt = c.GetAlt() // found first alt
+ } else if c.GetAlt() != alt {
+ return ATNInvalidAltNumber
+ }
+ }
+ return alt
+}
+
+// Add an edge to the DFA, if possible. This method calls
+// {@link //addDFAState} to ensure the {@code to} state is present in the
+// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
+// range of edges that can be represented in the DFA tables, p method
+// returns without adding the edge to the DFA.
+//
+// If {@code to} is {@code nil}, p method returns {@code nil}.
+// Otherwise, p method returns the {@link DFAState} returned by calling
+// {@link //addDFAState} for the {@code to} state.
+//
+// @param dfa The DFA
+// @param from The source state for the edge
+// @param t The input symbol
+// @param to The target state for the edge
+//
+// @return If {@code to} is {@code nil}, p method returns {@code nil}
+// otherwise p method returns the result of calling {@link //addDFAState}
+// on {@code to}
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
+ }
+ if to == nil {
+ return nil
+ }
+ p.atn.stateMu.Lock()
+ to = p.addDFAState(dfa, to) // used existing if possible not incoming
+ p.atn.stateMu.Unlock()
+ if from == nil || t < -1 || t > p.atn.maxTokenType {
+ return to
+ }
+ p.atn.edgeMu.Lock()
+ if from.getEdges() == nil {
+ from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1))
+ }
+ from.setIthEdge(t+1, to) // connect
+ p.atn.edgeMu.Unlock()
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ var names []string
+ if p.parser != nil {
+ names = p.parser.GetLiteralNames()
+ }
+
+ fmt.Println("DFA=\n" + dfa.String(names, nil))
+ }
+ return to
+}
+
+// addDFAState adds state D to the [DFA] if it is not already present, and returns
+// the actual instance stored in the [DFA]. If a state equivalent to D
+// is already in the [DFA], the existing state is returned. Otherwise, this
+// method returns D after adding it to the [DFA].
+//
+// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and
+// does not change the DFA.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
+ if d == ATNSimulatorError {
+ return d
+ }
+
+ existing, present := dfa.Get(d)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Print("addDFAState " + d.String() + " exists")
+ }
+ return existing
+ }
+
+ // The state will be added if not already there or we will be given back the existing state struct
+ // if it is present.
+ //
+ d.stateNumber = dfa.Len()
+ if !d.configs.readOnly {
+ d.configs.OptimizeConfigs(&p.BaseATNSimulator)
+ d.configs.readOnly = true
+ d.configs.configLookup = nil
+ }
+ dfa.Put(d)
+
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("addDFAState new " + d.String())
+ }
+
+ return d
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
+
+// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route.
+//
+// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer
+// so that they can see that this is happening and can take action if they want to.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int,
+ exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
similarity index 77%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
rename to vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
index 1c8cee747..c249bc138 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
@@ -31,7 +31,9 @@ type ParserRuleContext interface {
}
type BaseParserRuleContext struct {
- *BaseRuleContext
+ parentCtx RuleContext
+ invokingState int
+ RuleIndex int
start, stop Token
exception RecognitionException
@@ -40,8 +42,22 @@ type BaseParserRuleContext struct {
func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
prc := new(BaseParserRuleContext)
+ InitBaseParserRuleContext(prc, parent, invokingStateNumber)
+ return prc
+}
+
+func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) {
+ // What context invoked b rule?
+ prc.parentCtx = parent
- prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
+ // What state invoked the rule associated with b context?
+ // The "return address" is the followState of invokingState
+ // If parent is nil, b should be -1.
+ if parent == nil {
+ prc.invokingState = -1
+ } else {
+ prc.invokingState = invokingStateNumber
+ }
prc.RuleIndex = -1
// * If we are debugging or building a parse tree for a Visitor,
@@ -56,8 +72,6 @@ func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int)
// The exception that forced prc rule to return. If the rule successfully
// completed, prc is {@code nil}.
prc.exception = nil
-
- return prc
}
func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
@@ -90,14 +104,15 @@ func (prc *BaseParserRuleContext) GetText() string {
return s
}
-// Double dispatch methods for listeners
-func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
+// EnterRule is called when any rule is entered.
+func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) {
}
-func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
+// ExitRule is called when any rule is exited.
+func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) {
}
-// * Does not set parent link other add methods do that///
+// * Does not set parent link other add methods do that
func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
if prc.children == nil {
prc.children = make([]Tree, 0)
@@ -120,10 +135,9 @@ func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
return child
}
-// * Used by EnterOuterAlt to toss out a RuleContext previously added as
-// we entered a rule. If we have // label, we will need to remove
-// generic ruleContext object.
-// /
+// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as
+// we entered a rule. If we have a label, we will need to remove
+// the generic ruleContext object.
func (prc *BaseParserRuleContext) RemoveLastChild() {
if prc.children != nil && len(prc.children) > 0 {
prc.children = prc.children[0 : len(prc.children)-1]
@@ -293,7 +307,7 @@ func (prc *BaseParserRuleContext) GetChildCount() int {
return len(prc.children)
}
-func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
+func (prc *BaseParserRuleContext) GetSourceInterval() Interval {
if prc.start == nil || prc.stop == nil {
return TreeInvalidInterval
}
@@ -340,6 +354,50 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s
return s
}
+func (prc *BaseParserRuleContext) SetParent(v Tree) {
+ if v == nil {
+ prc.parentCtx = nil
+ } else {
+ prc.parentCtx = v.(RuleContext)
+ }
+}
+
+func (prc *BaseParserRuleContext) GetInvokingState() int {
+ return prc.invokingState
+}
+
+func (prc *BaseParserRuleContext) SetInvokingState(t int) {
+ prc.invokingState = t
+}
+
+func (prc *BaseParserRuleContext) GetRuleIndex() int {
+ return prc.RuleIndex
+}
+
+func (prc *BaseParserRuleContext) GetAltNumber() int {
+ return ATNInvalidAltNumber
+}
+
+func (prc *BaseParserRuleContext) SetAltNumber(_ int) {}
+
+// IsEmpty returns true if the context of b is empty.
+//
+// A context is empty if there is no invoking state, meaning nobody calls
+// current context.
+func (prc *BaseParserRuleContext) IsEmpty() bool {
+ return prc.invokingState == -1
+}
+
+// GetParent returns the combined text of all child nodes. This method only considers
+// tokens which have been added to the parse tree.
+//
+// Since tokens on hidden channels (e.g. whitespace or comments) are not
+// added to the parse trees, they will not appear in the output of this
+// method.
+func (prc *BaseParserRuleContext) GetParent() Tree {
+ return prc.parentCtx
+}
+
var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1)
type InterpreterRuleContext interface {
@@ -350,6 +408,7 @@ type BaseInterpreterRuleContext struct {
*BaseParserRuleContext
}
+//goland:noinspection GoUnusedExportedFunction
func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
prc := new(BaseInterpreterRuleContext)
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
new file mode 100644
index 000000000..c1b80cc1f
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
@@ -0,0 +1,727 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "golang.org/x/exp/slices"
+ "strconv"
+)
+
+var _emptyPredictionContextHash int
+
+func init() {
+ _emptyPredictionContextHash = murmurInit(1)
+ _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
+}
+
+func calculateEmptyHash() int {
+ return _emptyPredictionContextHash
+}
+
+const (
+ // BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $
+ // doesn't mean wildcard:
+ //
+ // $ + x = [$,x]
+ //
+ // Here,
+ //
+ // $ = EmptyReturnState
+ BasePredictionContextEmptyReturnState = 0x7FFFFFFF
+)
+
+// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here
+//
+//goland:noinspection GoUnusedGlobalVariable
+var (
+ BasePredictionContextglobalNodeCount = 1
+ BasePredictionContextid = BasePredictionContextglobalNodeCount
+)
+
+const (
+ PredictionContextEmpty = iota
+ PredictionContextSingleton
+ PredictionContextArray
+)
+
+// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to
+// emulate inheritance from Java, and can be used without an interface definition. An interface
+// is not required because no user code will ever need to implement this interface.
+type PredictionContext struct {
+ cachedHash int
+ pcType int
+ parentCtx *PredictionContext
+ returnState int
+ parents []*PredictionContext
+ returnStates []int
+}
+
+func NewEmptyPredictionContext() *PredictionContext {
+ nep := &PredictionContext{}
+ nep.cachedHash = calculateEmptyHash()
+ nep.pcType = PredictionContextEmpty
+ nep.returnState = BasePredictionContextEmptyReturnState
+ return nep
+}
+
+func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext {
+ pc := &PredictionContext{}
+ pc.pcType = PredictionContextSingleton
+ pc.returnState = returnState
+ pc.parentCtx = parent
+ if parent != nil {
+ pc.cachedHash = calculateHash(parent, returnState)
+ } else {
+ pc.cachedHash = calculateEmptyHash()
+ }
+ return pc
+}
+
+func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext {
+ if returnState == BasePredictionContextEmptyReturnState && parent == nil {
+ // someone can pass in the bits of an array ctx that mean $
+ return BasePredictionContextEMPTY
+ }
+ return NewBaseSingletonPredictionContext(parent, returnState)
+}
+
+func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext {
+ // Parent can be nil only if full ctx mode and we make an array
+ // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
+ // nil parent and
+ // returnState == {@link //EmptyReturnState}.
+ hash := murmurInit(1)
+ for _, parent := range parents {
+ hash = murmurUpdate(hash, parent.Hash())
+ }
+ for _, returnState := range returnStates {
+ hash = murmurUpdate(hash, returnState)
+ }
+ hash = murmurFinish(hash, len(parents)<<1)
+
+ nec := &PredictionContext{}
+ nec.cachedHash = hash
+ nec.pcType = PredictionContextArray
+ nec.parents = parents
+ nec.returnStates = returnStates
+ return nec
+}
+
+func (p *PredictionContext) Hash() int {
+ return p.cachedHash
+}
+
+func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ otherP := other.(*PredictionContext)
+ return other == nil || otherP == nil || otherP.isEmpty()
+ case PredictionContextSingleton:
+ return p.SingletonEquals(other)
+ case PredictionContextArray:
+ return p.ArrayEquals(other)
+ }
+ return false
+}
+
+func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool {
+ if o == nil {
+ return false
+ }
+ other := o.(*PredictionContext)
+ if other == nil || other.pcType != PredictionContextArray {
+ return false
+ }
+ if p.cachedHash != other.Hash() {
+ return false // can't be same if hash is different
+ }
+
+ // Must compare the actual array elements and not just the array address
+ //
+ return slices.Equal(p.returnStates, other.returnStates) &&
+ slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool {
+ return x.Equals(y)
+ })
+}
+
+func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool {
+ if other == nil {
+ return false
+ }
+ otherP := other.(*PredictionContext)
+ if otherP == nil {
+ return false
+ }
+
+ if p.cachedHash != otherP.Hash() {
+ return false // Can't be same if hash is different
+ }
+
+ if p.returnState != otherP.getReturnState(0) {
+ return false
+ }
+
+ // Both parents must be nil if one is
+ if p.parentCtx == nil {
+ return otherP.parentCtx == nil
+ }
+
+ return p.parentCtx.Equals(otherP.parentCtx)
+}
+
+func (p *PredictionContext) GetParent(i int) *PredictionContext {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return nil
+ case PredictionContextSingleton:
+ return p.parentCtx
+ case PredictionContextArray:
+ return p.parents[i]
+ }
+ return nil
+}
+
+func (p *PredictionContext) getReturnState(i int) int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return p.returnStates[i]
+ default:
+ return p.returnState
+ }
+}
+
+func (p *PredictionContext) GetReturnStates() []int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return p.returnStates
+ default:
+ return []int{p.returnState}
+ }
+}
+
+func (p *PredictionContext) length() int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return len(p.returnStates)
+ default:
+ return 1
+ }
+}
+
+func (p *PredictionContext) hasEmptyPath() bool {
+ switch p.pcType {
+ case PredictionContextSingleton:
+ return p.returnState == BasePredictionContextEmptyReturnState
+ }
+ return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState
+}
+
+func (p *PredictionContext) String() string {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return "$"
+ case PredictionContextSingleton:
+ var up string
+
+ if p.parentCtx == nil {
+ up = ""
+ } else {
+ up = p.parentCtx.String()
+ }
+
+ if len(up) == 0 {
+ if p.returnState == BasePredictionContextEmptyReturnState {
+ return "$"
+ }
+
+ return strconv.Itoa(p.returnState)
+ }
+
+ return strconv.Itoa(p.returnState) + " " + up
+ case PredictionContextArray:
+ if p.isEmpty() {
+ return "[]"
+ }
+
+ s := "["
+ for i := 0; i < len(p.returnStates); i++ {
+ if i > 0 {
+ s = s + ", "
+ }
+ if p.returnStates[i] == BasePredictionContextEmptyReturnState {
+ s = s + "$"
+ continue
+ }
+ s = s + strconv.Itoa(p.returnStates[i])
+ if !p.parents[i].isEmpty() {
+ s = s + " " + p.parents[i].String()
+ } else {
+ s = s + "nil"
+ }
+ }
+ return s + "]"
+
+ default:
+ return "unknown"
+ }
+}
+
+func (p *PredictionContext) isEmpty() bool {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return true
+ case PredictionContextArray:
+ // since EmptyReturnState can only appear in the last position, we
+ // don't need to verify that size==1
+ return p.returnStates[0] == BasePredictionContextEmptyReturnState
+ default:
+ return false
+ }
+}
+
+func (p *PredictionContext) Type() int {
+ return p.pcType
+}
+
+func calculateHash(parent *PredictionContext, returnState int) int {
+ h := murmurInit(1)
+ h = murmurUpdate(h, parent.Hash())
+ h = murmurUpdate(h, returnState)
+ return murmurFinish(h, 2)
+}
+
+// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
+// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
+// /
+func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ // if we are in RuleContext of start rule, s, then BasePredictionContext
+ // is EMPTY. Nobody called us. (if we are empty, return empty)
+ if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
+ return BasePredictionContextEMPTY
+ }
+ // If we have a parent, convert it to a BasePredictionContext graph
+ parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
+ state := a.states[outerContext.GetInvokingState()]
+ transition := state.GetTransitions()[0]
+
+ return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
+}
+
+func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+
+ // Share same graph if both same
+ //
+ if a == b || a.Equals(b) {
+ return a
+ }
+
+ if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton {
+ return mergeSingletons(a, b, rootIsWildcard, mergeCache)
+ }
+ // At least one of a or b is array
+ // If one is $ and rootIsWildcard, return $ as wildcard
+ if rootIsWildcard {
+ if a.isEmpty() {
+ return a
+ }
+ if b.isEmpty() {
+ return b
+ }
+ }
+
+ // Convert either Singleton or Empty to arrays, so that we can merge them
+ //
+ ara := convertToArray(a)
+ arb := convertToArray(b)
+ return mergeArrays(ara, arb, rootIsWildcard, mergeCache)
+}
+
+func convertToArray(pc *PredictionContext) *PredictionContext {
+ switch pc.Type() {
+ case PredictionContextEmpty:
+ return NewArrayPredictionContext([]*PredictionContext{}, []int{})
+ case PredictionContextSingleton:
+ return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)})
+ default:
+ // Already an array
+ }
+ return pc
+}
+
+// mergeSingletons merges two Singleton [PredictionContext] instances.
+//
+// Stack tops equal, parents merge is same return left graph.
+//
+//
+// Same stack top, parents differ merge parents giving array node, then
+// remainders of those graphs. A new root node is created to point to the
+// merged parents.
+//
+//
+// Different stack tops pointing to same parent. Make array node for the
+// root where both element in the root point to the same (original)
+// parent.
+//
+//
+// Different stack tops pointing to different parents. Make array node for
+// the root where each element points to the corresponding original
+// parent.
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// @param mergeCache
+// /
+func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+ if mergeCache != nil {
+ previous, present := mergeCache.Get(a, b)
+ if present {
+ return previous
+ }
+ previous, present = mergeCache.Get(b, a)
+ if present {
+ return previous
+ }
+ }
+
+ rootMerge := mergeRoot(a, b, rootIsWildcard)
+ if rootMerge != nil {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, rootMerge)
+ }
+ return rootMerge
+ }
+ if a.returnState == b.returnState {
+ parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
+ // if parent is same as existing a or b parent or reduced to a parent,
+ // return it
+ if parent.Equals(a.parentCtx) {
+ return a // ax + bx = ax, if a=b
+ }
+ if parent.Equals(b.parentCtx) {
+ return b // ax + bx = bx, if a=b
+ }
+ // else: ax + ay = a'[x,y]
+ // merge parents x and y, giving array node with x,y then remainders
+ // of those graphs. dup a, a' points at merged array.
+ // New joined parent so create a new singleton pointing to it, a'
+ spc := SingletonBasePredictionContextCreate(parent, a.returnState)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, spc)
+ }
+ return spc
+ }
+ // a != b payloads differ
+ // see if we can collapse parents due to $+x parents if local ctx
+ var singleParent *PredictionContext
+ if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax +
+ // bx =
+ // [a,b]x
+ singleParent = a.parentCtx
+ }
+ if singleParent != nil { // parents are same
+ // sort payloads and use same parent
+ payloads := []int{a.returnState, b.returnState}
+ if a.returnState > b.returnState {
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ }
+ parents := []*PredictionContext{singleParent, singleParent}
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, apc)
+ }
+ return apc
+ }
+ // parents differ and can't merge them. Just pack together
+ // into array can't merge.
+ // ax + by = [ax,by]
+ payloads := []int{a.returnState, b.returnState}
+ parents := []*PredictionContext{a.parentCtx, b.parentCtx}
+ if a.returnState > b.returnState { // sort by payload
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ parents = []*PredictionContext{b.parentCtx, a.parentCtx}
+ }
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, apc)
+ }
+ return apc
+}
+
+// Handle case where at least one of {@code a} or {@code b} is
+// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
+// to represent {@link //EMPTY}.
+//
+// Local-Context Merges
+//
+// These local-context merge operations are used when {@code rootIsWildcard}
+// is true.
+//
+// {@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//
+//
+// {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
+// {@code //EMPTY} return left graph.
+//
+//
+// Special case of last merge if local context.
+//
+//
+// Full-Context Merges
+//
+// These full-context merge operations are used when {@code rootIsWildcard}
+// is false.
+//
+//
+//
+// Must keep all contexts {@link //EMPTY} in array is a special value (and
+// nil parent).
+//
+//
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// /
+func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext {
+ if rootIsWildcard {
+ if a.pcType == PredictionContextEmpty {
+ return BasePredictionContextEMPTY // // + b =//
+ }
+ if b.pcType == PredictionContextEmpty {
+ return BasePredictionContextEMPTY // a +// =//
+ }
+ } else {
+ if a.isEmpty() && b.isEmpty() {
+ return BasePredictionContextEMPTY // $ + $ = $
+ } else if a.isEmpty() { // $ + x = [$,x]
+ payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []*PredictionContext{b.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ } else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present)
+ payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []*PredictionContext{a.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ }
+ }
+ return nil
+}
+
+// Merge two {@link ArrayBasePredictionContext} instances.
+//
+// Different tops, different parents.
+//
+//
+// Shared top, same parents.
+//
+//
+// Shared top, different parents.
+//
+//
+// Shared top, all shared parents.
+//
+//
+// Equal tops, merge parents and reduce top to
+// {@link SingletonBasePredictionContext}.
+//
+//
+//goland:noinspection GoBoolExpressions
+func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+ if mergeCache != nil {
+ previous, present := mergeCache.Get(a, b)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous
+ }
+ previous, present = mergeCache.Get(b, a)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous
+ }
+ }
+ // merge sorted payloads a + b => M
+ i := 0 // walks a
+ j := 0 // walks b
+ k := 0 // walks target M array
+
+ mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
+ mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates))
+ // walk and merge to yield mergedParents, mergedReturnStates
+ for i < len(a.returnStates) && j < len(b.returnStates) {
+ aParent := a.parents[i]
+ bParent := b.parents[j]
+ if a.returnStates[i] == b.returnStates[j] {
+ // same payload (stack tops are equal), must yield merged singleton
+ payload := a.returnStates[i]
+ // $+$ = $
+ bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
+ axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax
+ // ->
+ // ax
+ if bothDollars || axAX {
+ mergedParents[k] = aParent // choose left
+ mergedReturnStates[k] = payload
+ } else { // ax+ay -> a'[x,y]
+ mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
+ mergedParents[k] = mergedParent
+ mergedReturnStates[k] = payload
+ }
+ i++ // hop over left one as usual
+ j++ // but also Skip one in right side since we merge
+ } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
+ mergedParents[k] = aParent
+ mergedReturnStates[k] = a.returnStates[i]
+ i++
+ } else { // b > a, copy b[j] to M
+ mergedParents[k] = bParent
+ mergedReturnStates[k] = b.returnStates[j]
+ j++
+ }
+ k++
+ }
+ // copy over any payloads remaining in either array
+ if i < len(a.returnStates) {
+ for p := i; p < len(a.returnStates); p++ {
+ mergedParents[k] = a.parents[p]
+ mergedReturnStates[k] = a.returnStates[p]
+ k++
+ }
+ } else {
+ for p := j; p < len(b.returnStates); p++ {
+ mergedParents[k] = b.parents[p]
+ mergedReturnStates[k] = b.returnStates[p]
+ k++
+ }
+ }
+ // trim merged if we combined a few that had same stack tops
+ if k < len(mergedParents) { // write index < last position trim
+ if k == 1 { // for just one merged element, return singleton top
+ pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
+ if mergeCache != nil {
+ mergeCache.Put(a, b, pc)
+ }
+ return pc
+ }
+ mergedParents = mergedParents[0:k]
+ mergedReturnStates = mergedReturnStates[0:k]
+ }
+
+ M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
+
+ // if we created same array as a or b, return that instead
+ // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation
+ if M.Equals(a) {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, a)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
+ }
+ return a
+ }
+ if M.Equals(b) {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, b)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
+ }
+ return b
+ }
+ combineCommonParents(&mergedParents)
+
+ if mergeCache != nil {
+ mergeCache.Put(a, b, M)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
+ }
+ return M
+}
+
+// Make pass over all M parents and merge any Equals() ones.
+// Note that we pass a pointer to the slice as we want to modify it in place.
+//
+//goland:noinspection GoUnusedFunction
+func combineCommonParents(parents *[]*PredictionContext) {
+ uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext")
+
+ for p := 0; p < len(*parents); p++ {
+ parent := (*parents)[p]
+ _, _ = uniqueParents.Put(parent)
+ }
+ for q := 0; q < len(*parents); q++ {
+ pc, _ := uniqueParents.Get((*parents)[q])
+ (*parents)[q] = pc
+ }
+}
+
+func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext {
+ if context.isEmpty() {
+ return context
+ }
+ existing, present := visited.Get(context)
+ if present {
+ return existing
+ }
+
+ existing, present = contextCache.Get(context)
+ if present {
+ visited.Put(context, existing)
+ return existing
+ }
+ changed := false
+ parents := make([]*PredictionContext, context.length())
+ for i := 0; i < len(parents); i++ {
+ parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
+ if changed || !parent.Equals(context.GetParent(i)) {
+ if !changed {
+ parents = make([]*PredictionContext, context.length())
+ for j := 0; j < context.length(); j++ {
+ parents[j] = context.GetParent(j)
+ }
+ changed = true
+ }
+ parents[i] = parent
+ }
+ }
+ if !changed {
+ contextCache.add(context)
+ visited.Put(context, context)
+ return context
+ }
+ var updated *PredictionContext
+ if len(parents) == 0 {
+ updated = BasePredictionContextEMPTY
+ } else if len(parents) == 1 {
+ updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
+ } else {
+ updated = NewArrayPredictionContext(parents, context.GetReturnStates())
+ }
+ contextCache.add(updated)
+ visited.Put(updated, updated)
+ visited.Put(context, updated)
+
+ return updated
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
new file mode 100644
index 000000000..25dfb11e8
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
@@ -0,0 +1,48 @@
+package antlr
+
+var BasePredictionContextEMPTY = &PredictionContext{
+ cachedHash: calculateEmptyHash(),
+ pcType: PredictionContextEmpty,
+ returnState: BasePredictionContextEmptyReturnState,
+}
+
+// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared
+// context cash associated with contexts in DFA states. This cache
+// can be used for both lexers and parsers.
+type PredictionContextCache struct {
+ cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]]
+}
+
+func NewPredictionContextCache() *PredictionContextCache {
+ return &PredictionContextCache{
+ cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"),
+ }
+}
+
+// Add a context to the cache and return it. If the context already exists,
+// return that one instead and do not add a new context to the cache.
+// Protect shared cache from unsafe thread access.
+func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext {
+ if ctx.isEmpty() {
+ return BasePredictionContextEMPTY
+ }
+
+ // Put will return the existing entry if it is present (note this is done via Equals, not whether it is
+ // the same pointer), otherwise it will add the new entry and return that.
+ //
+ existing, present := p.cache.Get(ctx)
+ if present {
+ return existing
+ }
+ p.cache.Put(ctx, ctx)
+ return ctx
+}
+
+func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) {
+ pc, exists := p.cache.Get(ctx)
+ return pc, exists
+}
+
+func (p *PredictionContextCache) length() int {
+ return p.cache.Len()
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
new file mode 100644
index 000000000..3f85a6a52
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
@@ -0,0 +1,536 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// This enumeration defines the prediction modes available in ANTLR 4 along with
+// utility methods for analyzing configuration sets for conflicts and/or
+// ambiguities.
+
+const (
+ // PredictionModeSLL represents the SLL(*) prediction mode.
+ // This prediction mode ignores the current
+ // parser context when making predictions. This is the fastest prediction
+ // mode, and provides correct results for many grammars. This prediction
+ // mode is more powerful than the prediction mode provided by ANTLR 3, but
+ // may result in syntax errors for grammar and input combinations which are
+ // not SLL.
+ //
+ // When using this prediction mode, the parser will either return a correct
+ // parse tree (i.e. the same parse tree that would be returned with the
+ // [PredictionModeLL] prediction mode), or it will Report a syntax error. If a
+ // syntax error is encountered when using the SLL prediction mode,
+ // it may be due to either an actual syntax error in the input or indicate
+ // that the particular combination of grammar and input requires the more
+ // powerful LL prediction abilities to complete successfully.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeSLL = 0
+
+ // PredictionModeLL represents the LL(*) prediction mode.
+ // This prediction mode allows the current parser
+ // context to be used for resolving SLL conflicts that occur during
+ // prediction. This is the fastest prediction mode that guarantees correct
+ // parse results for all combinations of grammars with syntactically correct
+ // inputs.
+ //
+ // When using this prediction mode, the parser will make correct decisions
+ // for all syntactically-correct grammar and input combinations. However, in
+ // cases where the grammar is truly ambiguous this prediction mode might not
+ // report a precise answer for exactly which alternatives are
+ // ambiguous.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLL = 1
+
+ // PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode
+ // with exact ambiguity detection.
+ //
+ // In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode,
+ // this prediction mode instructs the prediction algorithm to determine the
+ // complete and exact set of ambiguous alternatives for every ambiguous
+ // decision encountered while parsing.
+ //
+ // This prediction mode may be used for diagnosing ambiguities during
+ // grammar development. Due to the performance overhead of calculating sets
+ // of ambiguous alternatives, this prediction mode should be avoided when
+ // the exact results are not necessary.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLLExactAmbigDetection = 2
+)
+
+// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition.
+//
+// This method computes the SLL prediction termination condition for both of
+// the following cases:
+//
+// - The usual SLL+LL fallback upon SLL conflict
+// - Pure SLL without LL fallback
+//
+// # Combined SLL+LL Parsing
+//
+// When LL-fallback is enabled upon SLL conflict, correct predictions are
+// ensured regardless of how the termination condition is computed by this
+// method. Due to the substantially higher cost of LL prediction, the
+// prediction should only fall back to LL when the additional lookahead
+// cannot lead to a unique SLL prediction.
+//
+// Assuming combined SLL+LL parsing, an SLL configuration set with only
+// conflicting subsets should fall back to full LL, even if the
+// configuration sets don't resolve to the same alternative, e.g.
+//
+// {1,2} and {3,4}
+//
+// If there is at least one non-conflicting
+// configuration, SLL could continue with the hopes that more lookahead will
+// resolve via one of those non-conflicting configurations.
+//
+// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
+// stops when it sees only conflicting configuration subsets. In contrast,
+// full LL keeps going when there is uncertainty.
+//
+// # Heuristic
+//
+// As a heuristic, we stop prediction when we see any conflicting subset
+// unless we see a state that only has one alternative associated with it.
+// The single-alt-state thing lets prediction continue upon rules like
+// (otherwise, it would admit defeat too soon):
+//
+// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ;
+//
+// When the [ATN] simulation reaches the state before ';', it has a
+// [DFA] state that looks like:
+//
+// [12|1|[], 6|2|[], 12|2|[]]
+//
+// Naturally
+//
+// 12|1|[] and 12|2|[]
+//
+// conflict, but we cannot stop processing this node because alternative to has another way to continue,
+// via
+//
+// [6|2|[]]
+//
+// It also let's us continue for this rule:
+//
+// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state immediately before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue, and so we do not stop
+// working on this state. In the previous example, we're concerned with
+// states associated with the conflicting alternatives. Here alt 3 is not
+// associated with the conflicting configs, but since we can continue
+// looking for input reasonably, don't declare the state done.
+//
+// # Pure SLL Parsing
+//
+// To handle pure SLL parsing, all we have to do is make sure that we
+// combine stack contexts for configurations that differ only by semantic
+// predicate. From there, we can do the usual SLL termination heuristic.
+//
+// # Predicates in SLL+LL Parsing
+//
+// SLL decisions don't evaluate predicates until after they reach [DFA] stop
+// states because they need to create the [DFA] cache that works in all
+// semantic situations. In contrast, full LL evaluates predicates collected
+// during start state computation, so it can ignore predicates thereafter.
+// This means that SLL termination detection can totally ignore semantic
+// predicates.
+//
+// Implementation-wise, [ATNConfigSet] combines stack contexts but not
+// semantic predicate contexts, so we might see two configurations like the
+// following:
+//
+// (s, 1, x, {}), (s, 1, x', {p})
+//
+// Before testing these configurations against others, we have to merge
+// x and x' (without modifying the existing configurations).
+// For example, we test (x+x')==x” when looking for conflicts in
+// the following configurations:
+//
+// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})
+//
+// If the configuration set has predicates (as indicated by
+// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of
+// the configurations to strip out all the predicates so that a standard
+// [ATNConfigSet] will merge everything ignoring predicates.
+func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool {
+
+ // Configs in rule stop states indicate reaching the end of the decision
+ // rule (local context) or end of start rule (full context). If all
+ // configs meet this condition, then none of the configurations is able
+ // to Match additional input, so we terminate prediction.
+ //
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return true
+ }
+
+ // pure SLL mode parsing
+ if mode == PredictionModeSLL {
+ // Don't bother with combining configs from different semantic
+ // contexts if we can fail over to full LL costs more time
+ // since we'll often fail over anyway.
+ if configs.hasSemanticContext {
+ // dup configs, tossing out semantic predicates
+ dup := NewATNConfigSet(false)
+ for _, c := range configs.configs {
+
+ // NewATNConfig({semanticContext:}, c)
+ c = NewATNConfig2(c, SemanticContextNone)
+ dup.Add(c, nil)
+ }
+ configs = dup
+ }
+ // now we have combined contexts for configs with dissimilar predicates
+ }
+ // pure SLL or combined SLL+LL mode parsing
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
+}
+
+// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a
+// [RuleStopState]. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// The func returns true if any configuration in the supplied configs is in a [RuleStopState]
+func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool {
+ for _, c := range configs.configs {
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a
+// [RuleStopState]. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// the func returns true if all configurations in configs are in a
+// [RuleStopState]
+func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool {
+
+ for _, c := range configs.configs {
+ if _, ok := c.GetState().(*RuleStopState); !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination.
+//
+// Can we stop looking ahead during [ATN] simulation or is there some
+// uncertainty as to which alternative we will ultimately pick, after
+// consuming more input? Even if there are partial conflicts, we might know
+// that everything is going to resolve to the same minimum alternative. That
+// means we can stop since no more lookahead will change that fact. On the
+// other hand, there might be multiple conflicts that resolve to different
+// minimums. That means we need more look ahead to decide which of those
+// alternatives we should predict.
+//
+// The basic idea is to split the set of configurations 'C', into
+// conflicting subsets (s, _, ctx, _) and singleton subsets with
+// non-conflicting configurations. Two configurations conflict if they have
+// identical [ATNConfig].state and [ATNConfig].context values
+// but a different [ATNConfig].alt value, e.g.
+//
+// (s, i, ctx, _)
+//
+// and
+//
+// (s, j, ctx, _) ; for i != j
+//
+// Reduce these configuration subsets to the set of possible alternatives.
+// You can compute the alternative subsets in one pass as follows:
+//
+// A_s,ctx = {i | (s, i, ctx, _)}
+//
+// for each configuration in C holding s and ctx fixed.
+//
+// Or in pseudo-code:
+//
+// for each configuration c in C:
+// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred
+//
+// The values in map are the set of
+//
+// A_s,ctx
+//
+// sets.
+//
+// If
+//
+// |A_s,ctx| = 1
+//
+// then there is no conflict associated with s and ctx.
+//
+// Reduce the subsets to singletons by choosing a minimum of each subset. If
+// the union of these alternative subsets is a singleton, then no amount of
+// further lookahead will help us. We will always pick that alternative. If,
+// however, there is more than one alternative, then we are uncertain which
+// alternative to predict and must continue looking for resolution. We may
+// or may not discover an ambiguity in the future, even if there are no
+// conflicting subsets this round.
+//
+// The biggest sin is to terminate early because it means we've made a
+// decision but were uncertain as to the eventual outcome. We haven't used
+// enough lookahead. On the other hand, announcing a conflict too late is no
+// big deal; you will still have the conflict. It's just inefficient. It
+// might even look until the end of file.
+//
+// No special consideration for semantic predicates is required because
+// predicates are evaluated on-the-fly for full LL prediction, ensuring that
+// no configuration contains a semantic context during the termination
+// check.
+//
+// # Conflicting Configs
+//
+// Two configurations:
+//
+// (s, i, x) and (s, j, x')
+//
+// conflict when i != j but x = x'. Because we merge all
+// (s, i, _) configurations together, that means that there are at
+// most n configurations associated with state s for
+// n possible alternatives in the decision. The merged stacks
+// complicate the comparison of configuration contexts x and x'.
+//
+// Sam checks to see if one is a subset of the other by calling
+// merge and checking to see if the merged result is either x or x'.
+// If the x associated with lowest alternative i
+// is the superset, then i is the only possible prediction since the
+// others resolve to min(i) as well. However, if x is
+// associated with j > i then at least one stack configuration for
+// j is not in conflict with alternative i. The algorithm
+// should keep going, looking for more lookahead due to the uncertainty.
+//
+// For simplicity, I'm doing an equality check between x and
+// x', which lets the algorithm continue to consume lookahead longer
+// than necessary. The reason I like the equality is of course the
+// simplicity but also because that is the test you need to detect the
+// alternatives that are actually in conflict.
+//
+// # Continue/Stop Rule
+//
+// Continue if the union of resolved alternative sets from non-conflicting and
+// conflicting alternative subsets has more than one alternative. We are
+// uncertain about which alternative to predict.
+//
+// The complete set of alternatives,
+//
+// [i for (_, i, _)]
+//
+// tells us which alternatives are still in the running for the amount of input we've
+// consumed at this point. The conflicting sets let us to strip away
+// configurations that won't lead to more states because we resolve
+// conflicts to the configuration with a minimum alternate for the
+// conflicting set.
+//
+// Cases
+//
+// - no conflicts and more than 1 alternative in set => continue
+// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set
+// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue
+// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set
+// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1
+// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets
+// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2}
+// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
+// {1} ∪ {2} = {1,2} => continue
+// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
+// {1} ∪ {2} = {1,2} => continue
+// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets
+// {1} ∪ {3} = {1,3} => continue
+//
+// # Exact Ambiguity Detection
+//
+// If all states report the same conflicting set of alternatives, then we
+// know we have the exact ambiguity set:
+//
+// |A_i| > 1
+//
+// and
+//
+// A_i = A_j ; for all i, j
+//
+// In other words, we continue examining lookahead until all A_i
+// have more than one alternative and all A_i are the same. If
+//
+// A={{1,2}, {1,3}}
+//
+// then regular LL prediction would terminate because the resolved set is {1}.
+// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and
+// two or one and three so we keep going. We can only stop prediction when
+// we need exact ambiguity detection when the sets look like:
+//
+// A={{1,2}}
+//
+// or
+//
+// {{1,2},{1,2}}, etc...
+func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
+ return PredictionModegetSingleViableAlt(altsets)
+}
+
+// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more
+// than one alternative.
+//
+// The func returns true if every [BitSet] in altsets has
+// [BitSet].cardinality cardinality > 1
+func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
+ return !PredictionModehasNonConflictingAltSet(altsets)
+}
+
+// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains
+// exactly one alternative.
+//
+// The func returns true if altsets contains at least one [BitSet] with
+// [BitSet].cardinality cardinality 1
+func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains
+// more than one alternative.
+//
+// The func returns true if altsets contains a [BitSet] with
+// [BitSet].cardinality cardinality > 1, otherwise false
+func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent.
+//
+// The func returns true if every member of altsets is equal to the others.
+func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
+ var first *BitSet
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if first == nil {
+ first = alts
+ } else if alts != first {
+ return false
+ }
+ }
+
+ return true
+}
+
+// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in
+// altsets. If no such alternative exists, this method returns
+// [ATNInvalidAltNumber].
+//
+// @param altsets a collection of alternative subsets
+func PredictionModegetUniqueAlt(altsets []*BitSet) int {
+ all := PredictionModeGetAlts(altsets)
+ if all.length() == 1 {
+ return all.minValue()
+ }
+
+ return ATNInvalidAltNumber
+}
+
+// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of
+// alternative subsets. This method returns the union of each [BitSet]
+// in altsets, being the set of represented alternatives in altsets.
+func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
+ all := NewBitSet()
+ for _, alts := range altsets {
+ all.or(alts)
+ }
+ return all
+}
+
+// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
+//
+// for each configuration c in configs:
+// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred
+func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
+ configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()")
+
+ for _, c := range configs.configs {
+
+ alts, ok := configToAlts.Get(c)
+ if !ok {
+ alts = NewBitSet()
+ configToAlts.Put(c, alts)
+ }
+ alts.add(c.GetAlt())
+ }
+
+ return configToAlts.Values()
+}
+
+// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set.
+//
+// for each configuration c in configs:
+// map[c.ATNConfig.state] U= c.ATNConfig.alt}
+func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict {
+ m := NewAltDict()
+
+ for _, c := range configs.configs {
+ alts := m.Get(c.GetState().String())
+ if alts == nil {
+ alts = NewBitSet()
+ m.put(c.GetState().String(), alts)
+ }
+ alts.(*BitSet).add(c.GetAlt())
+ }
+ return m
+}
+
+func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool {
+ values := PredictionModeGetStateToAltMap(configs).values()
+ for i := 0; i < len(values); i++ {
+ if values[i].(*BitSet).length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets
+// if there is one.
+//
+// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet
+func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
+ result := ATNInvalidAltNumber
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ minAlt := alts.minValue()
+ if result == ATNInvalidAltNumber {
+ result = minAlt
+ } else if result != minAlt { // more than 1 viable alt
+ return ATNInvalidAltNumber
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go
new file mode 100644
index 000000000..2e0b504fb
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go
@@ -0,0 +1,241 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strings"
+
+ "strconv"
+)
+
+type Recognizer interface {
+ GetLiteralNames() []string
+ GetSymbolicNames() []string
+ GetRuleNames() []string
+
+ Sempred(RuleContext, int, int) bool
+ Precpred(RuleContext, int) bool
+
+ GetState() int
+ SetState(int)
+ Action(RuleContext, int, int)
+ AddErrorListener(ErrorListener)
+ RemoveErrorListeners()
+ GetATN() *ATN
+ GetErrorListenerDispatch() ErrorListener
+ HasError() bool
+ GetError() RecognitionException
+ SetError(RecognitionException)
+}
+
+type BaseRecognizer struct {
+ listeners []ErrorListener
+ state int
+
+ RuleNames []string
+ LiteralNames []string
+ SymbolicNames []string
+ GrammarFileName string
+ SynErr RecognitionException
+}
+
+func NewBaseRecognizer() *BaseRecognizer {
+ rec := new(BaseRecognizer)
+ rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
+ rec.state = -1
+ return rec
+}
+
+//goland:noinspection GoUnusedGlobalVariable
+var tokenTypeMapCache = make(map[string]int)
+
+//goland:noinspection GoUnusedGlobalVariable
+var ruleIndexMapCache = make(map[string]int)
+
+func (b *BaseRecognizer) checkVersion(toolVersion string) {
+ runtimeVersion := "4.12.0"
+ if runtimeVersion != toolVersion {
+ fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
+ }
+}
+
+func (b *BaseRecognizer) SetError(err RecognitionException) {
+ b.SynErr = err
+}
+
+func (b *BaseRecognizer) HasError() bool {
+ return b.SynErr != nil
+}
+
+func (b *BaseRecognizer) GetError() RecognitionException {
+ return b.SynErr
+}
+
+func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) {
+ panic("action not implemented on Recognizer!")
+}
+
+func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
+ b.listeners = append(b.listeners, listener)
+}
+
+func (b *BaseRecognizer) RemoveErrorListeners() {
+ b.listeners = make([]ErrorListener, 0)
+}
+
+func (b *BaseRecognizer) GetRuleNames() []string {
+ return b.RuleNames
+}
+
+func (b *BaseRecognizer) GetTokenNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetSymbolicNames() []string {
+ return b.SymbolicNames
+}
+
+func (b *BaseRecognizer) GetLiteralNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetState() int {
+ return b.state
+}
+
+func (b *BaseRecognizer) SetState(v int) {
+ b.state = v
+}
+
+//func (b *Recognizer) GetTokenTypeMap() {
+// var tokenNames = b.GetTokenNames()
+// if (tokenNames==nil) {
+// panic("The current recognizer does not provide a list of token names.")
+// }
+// var result = tokenTypeMapCache[tokenNames]
+// if(result==nil) {
+// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
+// result.EOF = TokenEOF
+// tokenTypeMapCache[tokenNames] = result
+// }
+// return result
+//}
+
+// GetRuleIndexMap Get a map from rule names to rule indexes.
+//
+// Used for XPath and tree pattern compilation.
+//
+// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed.
+func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
+
+ panic("Method not defined!")
+ // var ruleNames = b.GetRuleNames()
+ // if (ruleNames==nil) {
+ // panic("The current recognizer does not provide a list of rule names.")
+ // }
+ //
+ // var result = ruleIndexMapCache[ruleNames]
+ // if(result==nil) {
+ // result = ruleNames.reduce(function(o, k, i) { o[k] = i })
+ // ruleIndexMapCache[ruleNames] = result
+ // }
+ // return result
+}
+
+// GetTokenType get the token type based upon its name
+func (b *BaseRecognizer) GetTokenType(_ string) int {
+ panic("Method not defined!")
+ // var ttype = b.GetTokenTypeMap()[tokenName]
+ // if (ttype !=nil) {
+ // return ttype
+ // } else {
+ // return TokenInvalidType
+ // }
+}
+
+//func (b *Recognizer) GetTokenTypeMap() map[string]int {
+// Vocabulary vocabulary = getVocabulary()
+//
+// Synchronized (tokenTypeMapCache) {
+// Map result = tokenTypeMapCache.Get(vocabulary)
+// if (result == null) {
+// result = new HashMap()
+// for (int i = 0; i < GetATN().maxTokenType; i++) {
+// String literalName = vocabulary.getLiteralName(i)
+// if (literalName != null) {
+// result.put(literalName, i)
+// }
+//
+// String symbolicName = vocabulary.GetSymbolicName(i)
+// if (symbolicName != null) {
+// result.put(symbolicName, i)
+// }
+// }
+//
+// result.put("EOF", Token.EOF)
+// result = Collections.unmodifiableMap(result)
+// tokenTypeMapCache.put(vocabulary, result)
+// }
+//
+// return result
+// }
+//}
+
+// GetErrorHeader returns the error header, normally line/character position information.
+//
+// Can be overridden in sub structs embedding BaseRecognizer.
+func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
+ line := e.GetOffendingToken().GetLine()
+ column := e.GetOffendingToken().GetColumn()
+ return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
+}
+
+// GetTokenErrorDisplay shows how a token should be displayed in an error message.
+//
+// The default is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
+//
+// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific
+// implementations of [ANTLRErrorStrategy] may provide a similar
+// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay()
+func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+
+ return "'" + s + "'"
+}
+
+func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
+ return NewProxyErrorListener(b.listeners)
+}
+
+// Sempred embedding structs need to override this if there are sempreds or actions
+// that the ATN interpreter needs to execute
+func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool {
+ return true
+}
+
+// Precpred embedding structs need to override this if there are preceding predicates
+// that the ATN interpreter needs to execute
+func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool {
+ return true
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
new file mode 100644
index 000000000..f2ad04793
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// RuleContext is a record of a single rule invocation. It knows
+// which context invoked it, if any. If there is no parent context, then
+// naturally the invoking state is not valid. The parent link
+// provides a chain upwards from the current rule invocation to the root
+// of the invocation tree, forming a stack.
+//
+// We actually carry no information about the rule associated with this context (except
+// when parsing). We keep only the state number of the invoking state from
+// the [ATN] submachine that invoked this. Contrast this with the s
+// pointer inside [ParserRuleContext] that tracks the current state
+// being "executed" for the current rule.
+//
+// The parent contexts are useful for computing lookahead sets and
+// getting error information.
+//
+// These objects are used during parsing and prediction.
+// For the special case of parsers, we use the struct
+// [ParserRuleContext], which embeds a RuleContext.
+//
+// @see ParserRuleContext
+type RuleContext interface {
+ RuleNode
+
+ GetInvokingState() int
+ SetInvokingState(int)
+
+ GetRuleIndex() int
+ IsEmpty() bool
+
+ GetAltNumber() int
+ SetAltNumber(altNumber int)
+
+ String([]string, RuleContext) string
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
similarity index 92%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
rename to vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
index a702e99de..68cb9061e 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
@@ -9,14 +9,13 @@ import (
"strconv"
)
-// A tree structure used to record the semantic context in which
-// an ATN configuration is valid. It's either a single predicate,
-// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
+// SemanticContext is a tree structure used to record the semantic context in which
//
-// I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
-// {@link SemanticContext} within the scope of this outer class.
+// an ATN configuration is valid. It's either a single predicate,
+// a conjunction p1 && p2, or a sum of products p1 || p2.
//
-
+// I have scoped the AND, OR, and Predicate subclasses of
+// [SemanticContext] within the scope of this outer ``class''
type SemanticContext interface {
Equals(other Collectable[SemanticContext]) bool
Hash() int
@@ -80,7 +79,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
var SemanticContextNone = NewPredicate(-1, -1, false)
-func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext {
return p
}
@@ -198,7 +197,7 @@ type AND struct {
func NewAND(a, b SemanticContext) *AND {
- operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands")
if aa, ok := a.(*AND); ok {
for _, o := range aa.opnds {
operands.Put(o)
@@ -230,9 +229,7 @@ func NewAND(a, b SemanticContext) *AND {
vs := operands.Values()
opnds := make([]SemanticContext, len(vs))
- for i, v := range vs {
- opnds[i] = v.(SemanticContext)
- }
+ copy(opnds, vs)
and := new(AND)
and.opnds = opnds
@@ -316,12 +313,12 @@ func (a *AND) Hash() int {
return murmurFinish(h, len(a.opnds))
}
-func (a *OR) Hash() int {
- h := murmurInit(41) // Init with a value different from AND
- for _, op := range a.opnds {
+func (o *OR) Hash() int {
+ h := murmurInit(41) // Init with o value different from AND
+ for _, op := range o.opnds {
h = murmurUpdate(h, op.Hash())
}
- return murmurFinish(h, len(a.opnds))
+ return murmurFinish(h, len(o.opnds))
}
func (a *AND) String() string {
@@ -349,7 +346,7 @@ type OR struct {
func NewOR(a, b SemanticContext) *OR {
- operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands")
if aa, ok := a.(*OR); ok {
for _, o := range aa.opnds {
operands.Put(o)
@@ -382,9 +379,7 @@ func NewOR(a, b SemanticContext) *OR {
vs := operands.Values()
opnds := make([]SemanticContext, len(vs))
- for i, v := range vs {
- opnds[i] = v.(SemanticContext)
- }
+ copy(opnds, vs)
o := new(OR)
o.opnds = opnds
diff --git a/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/vendor/github.com/antlr4-go/antlr/v4/statistics.go
new file mode 100644
index 000000000..70c0673a0
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/statistics.go
@@ -0,0 +1,281 @@
+//go:build antlr.stats
+
+package antlr
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default
+// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag.
+//
+
+// Tells various components to collect statistics - because it is only true when this file is included, it will
+// allow the compiler to completely eliminate all the code that is only used when collecting statistics.
+const collectStats = true
+
+// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run.
+// It is exported so that it can be used by others to look for things that are not already looked for in the
+// runtime statistics.
+type goRunStats struct {
+
+ // jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created
+ // during a run. It is exported so that it can be used by others to look for things that are not already looked for
+ // within this package.
+ //
+ jStats []*JStatRec
+ jStatsLock sync.RWMutex
+ topN int
+ topNByMax []*JStatRec
+ topNByUsed []*JStatRec
+ unusedCollections map[CollectionSource]int
+ counts map[CollectionSource]int
+}
+
+const (
+ collectionsFile = "collections"
+)
+
+var (
+ Statistics = &goRunStats{
+ topN: 10,
+ }
+)
+
+type statsOption func(*goRunStats) error
+
+// Configure allows the statistics system to be configured as the user wants and override the defaults
+func (s *goRunStats) Configure(options ...statsOption) error {
+ for _, option := range options {
+ err := option(s)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WithTopN sets the number of things to list in the report when we are concerned with the top N things.
+//
+// For example, if you want to see the top 20 collections by size, you can do:
+//
+// antlr.Statistics.Configure(antlr.WithTopN(20))
+func WithTopN(topN int) statsOption {
+ return func(s *goRunStats) error {
+ s.topN = topN
+ return nil
+ }
+}
+
+// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user.
+//
+// The function gathers and analyzes a number of statistics about any particular run of
+// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only
+// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be
+// especially useful in tracking down bugs or performance problems when an ANTLR user could
+// supply the output from this package, but cannot supply the grammar file(s) they are using, even
+// privately to the maintainers.
+//
+// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user
+// must call this function their selves to analyze the statistics. This is because none of the infrastructure is
+// extant unless the calling program is built with the antlr.stats tag like so:
+//
+// go build -tags antlr.stats .
+//
+// When a program is built with the antlr.stats tag, the Statistics object is created and available outside
+// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the
+// [Statistics.Report] function to report the statistics.
+//
+// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to
+// me [Jim Idle] directly at jimi@idle.ws
+//
+// [Jim Idle]: https:://github.com/jim-idle
+func (s *goRunStats) Analyze() {
+
+ // Look for anything that looks strange and record it in our local maps etc for the report to present it
+ //
+ s.CollectionAnomalies()
+ s.TopNCollections()
+}
+
+// TopNCollections looks through all the statistical records and gathers the top ten collections by size.
+func (s *goRunStats) TopNCollections() {
+
+ // Let's sort the stat records by MaxSize
+ //
+ sort.Slice(s.jStats, func(i, j int) bool {
+ return s.jStats[i].MaxSize > s.jStats[j].MaxSize
+ })
+
+ for i := 0; i < len(s.jStats) && i < s.topN; i++ {
+ s.topNByMax = append(s.topNByMax, s.jStats[i])
+ }
+
+ // Sort by the number of times used
+ //
+ sort.Slice(s.jStats, func(i, j int) bool {
+ return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts
+ })
+ for i := 0; i < len(s.jStats) && i < s.topN; i++ {
+ s.topNByUsed = append(s.topNByUsed, s.jStats[i])
+ }
+}
+
+// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output
+// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be
+// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix.
+func (s *goRunStats) Report(dir string, prefix string) error {
+
+ isDir, err := isDirectory(dir)
+ switch {
+ case err != nil:
+ return err
+ case !isDir:
+ return fmt.Errorf("output directory `%s` is not a directory", dir)
+ }
+ s.reportCollections(dir, prefix)
+
+ // Clean out any old data in case the user forgets
+ //
+ s.Reset()
+ return nil
+}
+
+func (s *goRunStats) Reset() {
+ s.jStats = nil
+ s.topNByUsed = nil
+ s.topNByMax = nil
+}
+
+func (s *goRunStats) reportCollections(dir, prefix string) {
+ cname := filepath.Join(dir, ".asciidoctor")
+ // If the file doesn't exist, create it, or append to the file
+ f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+ _, _ = f.WriteString(`// .asciidoctorconfig
+++++
+
+++++`)
+ _ = f.Close()
+
+ fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc")
+ // If the file doesn't exist, create it, or append to the file
+ f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer func(f *os.File) {
+ err := f.Close()
+ if err != nil {
+ log.Fatal(err)
+ }
+ }(f)
+ _, _ = f.WriteString("= Collections for " + prefix + "\n\n")
+
+ _, _ = f.WriteString("== Summary\n")
+
+ if s.unusedCollections != nil {
+ _, _ = f.WriteString("=== Unused Collections\n")
+ _, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n")
+ _, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n")
+ _, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n")
+ _, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n")
+ _, _ = f.WriteString(" actually needed.\n\n")
+
+ _, _ = f.WriteString("\n.Unused collections\n")
+ _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Type | Count\n")
+
+ for k, v := range s.unusedCollections {
+ _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
+ }
+ f.WriteString("|===\n\n")
+ }
+
+ _, _ = f.WriteString("\n.Summary of Collections\n")
+ _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Type | Count\n")
+ for k, v := range s.counts {
+ _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
+ }
+ _, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n")
+ _, _ = f.WriteString("|===\n\n")
+
+ _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n")
+ _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n")
+ for _, c := range s.topNByMax {
+ _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
+ _, _ = f.WriteString("| " + c.Description + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
+ _, _ = f.WriteString("\n")
+ }
+ _, _ = f.WriteString("|===\n\n")
+
+ _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n")
+ _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n")
+ for _, c := range s.topNByUsed {
+ _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
+ _, _ = f.WriteString("| " + c.Description + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n")
+ _, _ = f.WriteString("\n")
+ }
+ _, _ = f.WriteString("|===\n\n")
+}
+
+// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled.
+func (s *goRunStats) AddJStatRec(rec *JStatRec) {
+ s.jStatsLock.Lock()
+ defer s.jStatsLock.Unlock()
+ s.jStats = append(s.jStats, rec)
+}
+
+// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found.
+func (s *goRunStats) CollectionAnomalies() {
+ s.jStatsLock.RLock()
+ defer s.jStatsLock.RUnlock()
+ s.counts = make(map[CollectionSource]int, len(s.jStats))
+ for _, c := range s.jStats {
+
+ // Accumlate raw counts
+ //
+ s.counts[c.Source]++
+
+ // Look for allocated but unused collections and count them
+ if c.MaxSize == 0 && c.Puts == 0 {
+ if s.unusedCollections == nil {
+ s.unusedCollections = make(map[CollectionSource]int)
+ }
+ s.unusedCollections[c.Source]++
+ }
+ if c.MaxSize > 6000 {
+ fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar")
+ }
+ }
+
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/stats_data.go b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
new file mode 100644
index 000000000..4d9eb94e5
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
@@ -0,0 +1,23 @@
+package antlr
+
+// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be
+// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies
+// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function
+// for ideas on what can be gleaned from these statistics about collections.
+type JStatRec struct {
+ Source CollectionSource
+ MaxSize int
+ CurSize int
+ Gets int
+ GetHits int
+ GetMisses int
+ GetHashConflicts int
+ GetNoEnt int
+ Puts int
+ PutHits int
+ PutMisses int
+ PutHashConflicts int
+ MaxSlotSize int
+ Description string
+ CreateStack []byte
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go b/vendor/github.com/antlr4-go/antlr/v4/token.go
similarity index 86%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
rename to vendor/github.com/antlr4-go/antlr/v4/token.go
index f73b06bc6..9670efb82 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/token.go
@@ -35,6 +35,8 @@ type Token interface {
GetTokenSource() TokenSource
GetInputStream() CharStream
+
+ String() string
}
type BaseToken struct {
@@ -53,7 +55,7 @@ type BaseToken struct {
const (
TokenInvalidType = 0
- // During lookahead operations, this "token" signifies we hit rule end ATN state
+ // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state
// and did not follow it despite needing to.
TokenEpsilon = -2
@@ -61,15 +63,16 @@ const (
TokenEOF = -1
- // All tokens go to the parser (unless Skip() is called in that rule)
+ // TokenDefaultChannel is the default channel upon which tokens are sent to the parser.
+ //
+ // All tokens go to the parser (unless [Skip] is called in the lexer rule)
// on a particular "channel". The parser tunes to a particular channel
// so that whitespace etc... can go to the parser on a "hidden" channel.
-
TokenDefaultChannel = 0
- // Anything on different channel than DEFAULT_CHANNEL is not parsed
- // by parser.
-
+ // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel].
+ //
+ // Anything on a different channel than TokenDefaultChannel is not parsed by parser.
TokenHiddenChannel = 1
)
@@ -118,21 +121,22 @@ func (b *BaseToken) GetInputStream() CharStream {
}
type CommonToken struct {
- *BaseToken
+ BaseToken
}
func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
- t := new(CommonToken)
-
- t.BaseToken = new(BaseToken)
+ t := &CommonToken{
+ BaseToken: BaseToken{
+ source: source,
+ tokenType: tokenType,
+ channel: channel,
+ start: start,
+ stop: stop,
+ tokenIndex: -1,
+ },
+ }
- t.source = source
- t.tokenType = tokenType
- t.channel = channel
- t.start = start
- t.stop = stop
- t.tokenIndex = -1
if t.source.tokenSource != nil {
t.line = source.tokenSource.GetLine()
t.column = source.tokenSource.GetCharPositionInLine()
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go b/vendor/github.com/antlr4-go/antlr/v4/token_source.go
similarity index 100%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go
rename to vendor/github.com/antlr4-go/antlr/v4/token_source.go
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go
similarity index 90%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
rename to vendor/github.com/antlr4-go/antlr/v4/token_stream.go
index 1527d43f6..bf4ff6633 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go
@@ -8,13 +8,14 @@ type TokenStream interface {
IntStream
LT(k int) Token
+ Reset()
Get(index int) Token
GetTokenSource() TokenSource
SetTokenSource(TokenSource)
GetAllText() string
- GetTextFromInterval(*Interval) string
+ GetTextFromInterval(Interval) string
GetTextFromRuleContext(RuleContext) string
GetTextFromTokens(Token, Token) string
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
new file mode 100644
index 000000000..ccf59b465
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
@@ -0,0 +1,662 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "fmt"
+)
+
+//
+// Useful for rewriting out a buffered input token stream after doing some
+// augmentation or other manipulations on it.
+
+//
+// You can insert stuff, replace, and delete chunks. Note that the operations
+// are done lazily--only if you convert the buffer to a {@link String} with
+// {@link TokenStream#getText()}. This is very efficient because you are not
+// moving data around all the time. As the buffer of tokens is converted to
+// strings, the {@link #getText()} method(s) scan the input token stream and
+// check to see if there is an operation at the current index. If so, the
+// operation is done and then normal {@link String} rendering continues on the
+// buffer. This is like having multiple Turing machine instruction streams
+// (programs) operating on a single input tape. :)
+//
+
+// This rewriter makes no modifications to the token stream. It does not ask the
+// stream to fill itself up nor does it advance the input cursor. The token
+// stream {@link TokenStream#index()} will return the same value before and
+// after any {@link #getText()} call.
+
+//
+// The rewriter only works on tokens that you have in the buffer and ignores the
+// current input cursor. If you are buffering tokens on-demand, calling
+// {@link #getText()} halfway through the input will only do rewrites for those
+// tokens in the first half of the file.
+
+//
+// Since the operations are done lazily at {@link #getText}-time, operations do
+// not screw up the token index values. That is, an insert operation at token
+// index {@code i} does not change the index values for tokens
+// {@code i}+1..n-1.
+
+//
+// Because operations never actually alter the buffer, you may always get the
+// original token stream back without undoing anything. Since the instructions
+// are queued up, you can easily simulate transactions and roll back any changes
+// if there is an error just by removing instructions. For example,
+
+//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+//
+
+//
+// Then in the rules, you can execute (assuming rewriter is visible):
+
+//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+//
+
+//
+// You can also have multiple "instruction streams" and get multiple rewrites
+// from a single pass over the input. Just name the instruction streams and use
+// that name again when printing the buffer. This could be useful for generating
+// a C file and also its header file--all from the same buffer:
+
+//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+//
+
+//
+// If you don't use named rewrite streams, a "default" stream is used as the
+// first example shows.
+
+const (
+ DefaultProgramName = "default"
+ ProgramInitSize = 100
+ MinTokenIndex = 0
+)
+
+// Define the rewrite operation hierarchy
+
+type RewriteOperation interface {
+
+ // Execute the rewrite operation by possibly adding to the buffer.
+ // Return the index of the next token to operate on.
+ Execute(buffer *bytes.Buffer) int
+ String() string
+ GetInstructionIndex() int
+ GetIndex() int
+ GetText() string
+ GetOpName() string
+ GetTokens() TokenStream
+ SetInstructionIndex(val int)
+ SetIndex(int)
+ SetText(string)
+ SetOpName(string)
+ SetTokens(TokenStream)
+}
+
+type BaseRewriteOperation struct {
+ //Current index of rewrites list
+ instructionIndex int
+ //Token buffer index
+ index int
+ //Substitution text
+ text string
+ //Actual operation name
+ opName string
+ //Pointer to token steam
+ tokens TokenStream
+}
+
+func (op *BaseRewriteOperation) GetInstructionIndex() int {
+ return op.instructionIndex
+}
+
+func (op *BaseRewriteOperation) GetIndex() int {
+ return op.index
+}
+
+func (op *BaseRewriteOperation) GetText() string {
+ return op.text
+}
+
+func (op *BaseRewriteOperation) GetOpName() string {
+ return op.opName
+}
+
+func (op *BaseRewriteOperation) GetTokens() TokenStream {
+ return op.tokens
+}
+
+func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
+ op.instructionIndex = val
+}
+
+func (op *BaseRewriteOperation) SetIndex(val int) {
+ op.index = val
+}
+
+func (op *BaseRewriteOperation) SetText(val string) {
+ op.text = val
+}
+
+func (op *BaseRewriteOperation) SetOpName(val string) {
+ op.opName = val
+}
+
+func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
+ op.tokens = val
+}
+
+func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int {
+ return op.index
+}
+
+func (op *BaseRewriteOperation) String() string {
+ return fmt.Sprintf("<%s@%d:\"%s\">",
+ op.opName,
+ op.tokens.Get(op.GetIndex()),
+ op.text,
+ )
+
+}
+
+type InsertBeforeOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
+ return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
+ index: index,
+ text: text,
+ opName: "InsertBeforeOp",
+ tokens: stream,
+ }}
+}
+
+func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index + 1
+}
+
+func (op *InsertBeforeOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions
+// first and then the "insert before" instructions at same index. Implementation
+// of "insert after" is "insert before index+1".
+type InsertAfterOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
+ return &InsertAfterOp{
+ BaseRewriteOperation: BaseRewriteOperation{
+ index: index + 1,
+ text: text,
+ tokens: stream,
+ },
+ }
+}
+
+func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index + 1
+}
+
+func (op *InsertAfterOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp
+// instructions.
+type ReplaceOp struct {
+ BaseRewriteOperation
+ LastIndex int
+}
+
+func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
+ return &ReplaceOp{
+ BaseRewriteOperation: BaseRewriteOperation{
+ index: from,
+ text: text,
+ opName: "ReplaceOp",
+ tokens: stream,
+ },
+ LastIndex: to,
+ }
+}
+
+func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int {
+ if op.text != "" {
+ buffer.WriteString(op.text)
+ }
+ return op.LastIndex + 1
+}
+
+func (op *ReplaceOp) String() string {
+ if op.text == "" {
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
+ }
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
+}
+
+type TokenStreamRewriter struct {
+ //Our source stream
+ tokens TokenStream
+ // You may have multiple, named streams of rewrite operations.
+ // I'm calling these things "programs."
+ // Maps String (name) → rewrite (List)
+ programs map[string][]RewriteOperation
+ lastRewriteTokenIndexes map[string]int
+}
+
+func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
+ return &TokenStreamRewriter{
+ tokens: tokens,
+ programs: map[string][]RewriteOperation{
+ DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize),
+ },
+ lastRewriteTokenIndexes: map[string]int{},
+ }
+}
+
+func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
+ return tsr.tokens
+}
+
+// Rollback the instruction stream for a program so that
+// the indicated instruction (via instructionIndex) is no
+// longer in the stream. UNTESTED!
+func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) {
+ is, ok := tsr.programs[programName]
+ if ok {
+ tsr.programs[programName] = is[MinTokenIndex:instructionIndex]
+ }
+}
+
+func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) {
+ tsr.Rollback(DefaultProgramName, instructionIndex)
+}
+
+// DeleteProgram Reset the program so that no instructions exist
+func (tsr *TokenStreamRewriter) DeleteProgram(programName string) {
+ tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included
+}
+
+func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
+ tsr.DeleteProgram(DefaultProgramName)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) {
+ // to insert after, just insert before next index (even if past end)
+ var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
+ tsr.InsertAfter(DefaultProgramName, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) {
+ tsr.InsertAfter(programName, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) {
+ var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
+ tsr.InsertBefore(DefaultProgramName, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) {
+ tsr.InsertBefore(programName, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) {
+ if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
+ panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
+ from, to, tsr.tokens.Size()))
+ }
+ var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
+ tsr.Replace(DefaultProgramName, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
+ tsr.ReplaceDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) {
+ tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
+ tsr.ReplaceToken(DefaultProgramName, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
+ tsr.ReplaceTokenDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) {
+ tsr.Replace(programName, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
+ tsr.Delete(DefaultProgramName, from, to)
+}
+
+func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
+ tsr.DeleteDefault(index, index)
+}
+
+func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) {
+ tsr.ReplaceToken(programName, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
+ tsr.DeleteToken(DefaultProgramName, from, to)
+}
+
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int {
+ i, ok := tsr.lastRewriteTokenIndexes[programName]
+ if !ok {
+ return -1
+ }
+ return i
+}
+
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
+ return tsr.GetLastRewriteTokenIndex(DefaultProgramName)
+}
+
+func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) {
+ tsr.lastRewriteTokenIndexes[programName] = i
+}
+
+func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
+ is := make([]RewriteOperation, 0, ProgramInitSize)
+ tsr.programs[name] = is
+ return is
+}
+
+func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) {
+ is := tsr.GetProgram(name)
+ is = append(is, op)
+ tsr.programs[name] = is
+}
+
+func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
+ is, ok := tsr.programs[name]
+ if !ok {
+ is = tsr.InitializeProgram(name)
+ }
+ return is
+}
+
+// GetTextDefault returns the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetTextDefault() string {
+ return tsr.GetText(
+ DefaultProgramName,
+ NewInterval(0, tsr.tokens.Size()-1))
+}
+
+// GetText returns the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string {
+ rewrites := tsr.programs[programName]
+ start := interval.Start
+ stop := interval.Stop
+ // ensure start/end are in range
+ stop = min(stop, tsr.tokens.Size()-1)
+ start = max(start, 0)
+ if len(rewrites) == 0 {
+ return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
+ }
+ buf := bytes.Buffer{}
+ // First, optimize instruction stream
+ indexToOp := reduceToSingleOperationPerIndex(rewrites)
+ // Walk buffer, executing instructions and emitting tokens
+ for i := start; i <= stop && i < tsr.tokens.Size(); {
+ op := indexToOp[i]
+ delete(indexToOp, i) // remove so any left have index size-1
+ t := tsr.tokens.Get(i)
+ if op == nil {
+ // no operation at that index, just dump token
+ if t.GetTokenType() != TokenEOF {
+ buf.WriteString(t.GetText())
+ }
+ i++ // move to next token
+ } else {
+ i = op.Execute(&buf) // execute operation and skip
+ }
+ }
+ // include stuff after end if it's last index in buffer
+ // So, if they did an insertAfter(lastValidIndex, "foo"), include
+ // foo if end==lastValidIndex.
+ if stop == tsr.tokens.Size()-1 {
+ // Scan any remaining operations after last token
+ // should be included (they will be inserts).
+ for _, op := range indexToOp {
+ if op.GetIndex() >= tsr.tokens.Size()-1 {
+ buf.WriteString(op.GetText())
+ }
+ }
+ }
+ return buf.String()
+}
+
+// reduceToSingleOperationPerIndex combines operations and report invalid operations (like
+// overlapping replaces that are not completed nested). Inserts to
+// same index need to be combined etc...
+//
+// Here are the cases:
+//
+// I.i.u I.j.v leave alone, non-overlapping
+// I.i.u I.i.v combine: Iivu
+//
+// R.i-j.u R.x-y.v | i-j in x-y delete first R
+// R.i-j.u R.i-j.v delete first R
+// R.i-j.u R.x-y.v | x-y in i-j ERROR
+// R.i-j.u R.x-y.v | boundaries overlap ERROR
+//
+// Delete special case of replace (text==null):
+// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+//
+// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
+// we're not deleting i)
+// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping
+// R.x-y.v I.i.u | i in x-y ERROR
+// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
+// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping
+//
+// I.i.u = insert u before op @ index i
+// R.x-y.u = replace x-y indexed tokens with u
+//
+// First we need to examine replaces. For any replace op:
+//
+// 1. wipe out any insertions before op within that range.
+// 2. Drop any replace op before that is contained completely within
+// that range.
+// 3. Throw exception upon boundary overlap with any previous replace.
+//
+// Then we can deal with inserts:
+//
+// 1. for any inserts to same index, combine even if not adjacent.
+// 2. for any prior replace with same left boundary, combine this
+// insert with replace and delete this 'replace'.
+// 3. throw exception if index in same range as previous replace
+//
+// Don't actually delete; make op null in list. Easier to walk list.
+// Later we can throw as we add to index → op map.
+//
+// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+// inserted stuff would be before the 'replace' range. But, if you
+// add tokens in front of a method body '{' and then delete the method
+// body, I think the stuff before the '{' you added should disappear too.
+//
+// The func returns a map from token index to operation.
+func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
+ // WALK REPLACES
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ rop, ok := op.(*ReplaceOp)
+ if !ok {
+ continue
+ }
+ // Wipe prior inserts within range
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if iop, ok := rewrites[j].(*InsertBeforeOp); ok {
+ if iop.index == rop.index {
+ // E.g., insert before 2, delete 2..2; update replace
+ // text to include insert before, kill insert
+ rewrites[iop.instructionIndex] = nil
+ if rop.text != "" {
+ rop.text = iop.text + rop.text
+ } else {
+ rop.text = iop.text
+ }
+ } else if iop.index > rop.index && iop.index <= rop.LastIndex {
+ // delete insert as it's a no-op.
+ rewrites[iop.instructionIndex] = nil
+ }
+ }
+ }
+ // Drop any prior replaces contained within
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if prevop, ok := rewrites[j].(*ReplaceOp); ok {
+ if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
+ // delete replace as it's a no-op.
+ rewrites[prevop.instructionIndex] = nil
+ continue
+ }
+ // throw exception unless disjoint or identical
+ disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
+ // Delete special case of replace (text==null):
+ // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+ if prevop.text == "" && rop.text == "" && !disjoint {
+ rewrites[prevop.instructionIndex] = nil
+ rop.index = min(prevop.index, rop.index)
+ rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
+ } else if !disjoint {
+ panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
+ }
+ }
+ }
+ }
+ // WALK INSERTS
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ //hack to replicate inheritance in composition
+ _, iok := rewrites[i].(*InsertBeforeOp)
+ _, aok := rewrites[i].(*InsertAfterOp)
+ if !iok && !aok {
+ continue
+ }
+ iop := rewrites[i]
+ // combine current insert with prior if any at same index
+ // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if nextIop, ok := rewrites[j].(*InsertAfterOp); ok {
+ if nextIop.index == iop.GetIndex() {
+ iop.SetText(nextIop.text + iop.GetText())
+ rewrites[j] = nil
+ }
+ }
+ if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok {
+ if prevIop.index == iop.GetIndex() {
+ iop.SetText(iop.GetText() + prevIop.text)
+ rewrites[prevIop.instructionIndex] = nil
+ }
+ }
+ }
+ // look for replaces where iop.index is in range; error
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if rop, ok := rewrites[j].(*ReplaceOp); ok {
+ if iop.GetIndex() == rop.index {
+ rop.text = iop.GetText() + rop.text
+ rewrites[i] = nil
+ continue
+ }
+ if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex {
+ panic("insert op " + iop.String() + " within boundaries of previous " + rop.String())
+ }
+ }
+ }
+ }
+ m := map[int]RewriteOperation{}
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ if _, ok := m[op.GetIndex()]; ok {
+ panic("should only be one op per index")
+ }
+ m[op.GetIndex()] = op
+ }
+ return m
+}
+
+/*
+ Quick fixing Go lack of overloads
+*/
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ } else {
+ return b
+ }
+}
+func min(a, b int) int {
+ if a < b {
+ return a
+ } else {
+ return b
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go b/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go
similarity index 100%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go
rename to vendor/github.com/antlr4-go/antlr/v4/trace_listener.go
diff --git a/vendor/github.com/antlr4-go/antlr/v4/transition.go b/vendor/github.com/antlr4-go/antlr/v4/transition.go
new file mode 100644
index 000000000..313b0fc12
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/transition.go
@@ -0,0 +1,439 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// atom, set, epsilon, action, predicate, rule transitions.
+//
+// This is a one way link. It emanates from a state (usually via a list of
+// transitions) and has a target state.
+//
+// Since we never have to change the ATN transitions once we construct it,
+// the states. We'll use the term Edge for the DFA to distinguish them from
+// ATN transitions.
+
+type Transition interface {
+ getTarget() ATNState
+ setTarget(ATNState)
+ getIsEpsilon() bool
+ getLabel() *IntervalSet
+ getSerializationType() int
+ Matches(int, int, int) bool
+}
+
+type BaseTransition struct {
+ target ATNState
+ isEpsilon bool
+ label int
+ intervalSet *IntervalSet
+ serializationType int
+}
+
+func NewBaseTransition(target ATNState) *BaseTransition {
+
+ if target == nil {
+ panic("target cannot be nil.")
+ }
+
+ t := new(BaseTransition)
+
+ t.target = target
+ // Are we epsilon, action, sempred?
+ t.isEpsilon = false
+ t.intervalSet = nil
+
+ return t
+}
+
+func (t *BaseTransition) getTarget() ATNState {
+ return t.target
+}
+
+func (t *BaseTransition) setTarget(s ATNState) {
+ t.target = s
+}
+
+func (t *BaseTransition) getIsEpsilon() bool {
+ return t.isEpsilon
+}
+
+func (t *BaseTransition) getLabel() *IntervalSet {
+ return t.intervalSet
+}
+
+func (t *BaseTransition) getSerializationType() int {
+ return t.serializationType
+}
+
+func (t *BaseTransition) Matches(_, _, _ int) bool {
+ panic("Not implemented")
+}
+
+const (
+ TransitionEPSILON = 1
+ TransitionRANGE = 2
+ TransitionRULE = 3
+ TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
+ TransitionATOM = 5
+ TransitionACTION = 6
+ TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
+ TransitionNOTSET = 8
+ TransitionWILDCARD = 9
+ TransitionPRECEDENCE = 10
+)
+
+//goland:noinspection GoUnusedGlobalVariable
+var TransitionserializationNames = []string{
+ "INVALID",
+ "EPSILON",
+ "RANGE",
+ "RULE",
+ "PREDICATE",
+ "ATOM",
+ "ACTION",
+ "SET",
+ "NOT_SET",
+ "WILDCARD",
+ "PRECEDENCE",
+}
+
+//var TransitionserializationTypes struct {
+// EpsilonTransition int
+// RangeTransition int
+// RuleTransition int
+// PredicateTransition int
+// AtomTransition int
+// ActionTransition int
+// SetTransition int
+// NotSetTransition int
+// WildcardTransition int
+// PrecedencePredicateTransition int
+//}{
+// TransitionEPSILON,
+// TransitionRANGE,
+// TransitionRULE,
+// TransitionPREDICATE,
+// TransitionATOM,
+// TransitionACTION,
+// TransitionSET,
+// TransitionNOTSET,
+// TransitionWILDCARD,
+// TransitionPRECEDENCE
+//}
+
+// AtomTransition
+// TODO: make all transitions sets? no, should remove set edges
+type AtomTransition struct {
+ BaseTransition
+}
+
+func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
+ t := &AtomTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionATOM,
+ label: intervalSet,
+ isEpsilon: false,
+ },
+ }
+ t.intervalSet = t.makeLabel()
+
+ return t
+}
+
+func (t *AtomTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addOne(t.label)
+ return s
+}
+
+func (t *AtomTransition) Matches(symbol, _, _ int) bool {
+ return t.label == symbol
+}
+
+func (t *AtomTransition) String() string {
+ return strconv.Itoa(t.label)
+}
+
+type RuleTransition struct {
+ BaseTransition
+ followState ATNState
+ ruleIndex, precedence int
+}
+
+func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
+ return &RuleTransition{
+ BaseTransition: BaseTransition{
+ target: ruleStart,
+ isEpsilon: true,
+ serializationType: TransitionRULE,
+ },
+ ruleIndex: ruleIndex,
+ precedence: precedence,
+ followState: followState,
+ }
+}
+
+func (t *RuleTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+type EpsilonTransition struct {
+ BaseTransition
+ outermostPrecedenceReturn int
+}
+
+func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
+ return &EpsilonTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionEPSILON,
+ isEpsilon: true,
+ },
+ outermostPrecedenceReturn: outermostPrecedenceReturn,
+ }
+}
+
+func (t *EpsilonTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *EpsilonTransition) String() string {
+ return "epsilon"
+}
+
+type RangeTransition struct {
+ BaseTransition
+ start, stop int
+}
+
+func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
+ t := &RangeTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionRANGE,
+ isEpsilon: false,
+ },
+ start: start,
+ stop: stop,
+ }
+ t.intervalSet = t.makeLabel()
+ return t
+}
+
+func (t *RangeTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addRange(t.start, t.stop)
+ return s
+}
+
+func (t *RangeTransition) Matches(symbol, _, _ int) bool {
+ return symbol >= t.start && symbol <= t.stop
+}
+
+func (t *RangeTransition) String() string {
+ var sb strings.Builder
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(t.start))
+ sb.WriteString("'..'")
+ sb.WriteRune(rune(t.stop))
+ sb.WriteByte('\'')
+ return sb.String()
+}
+
+type AbstractPredicateTransition interface {
+ Transition
+ IAbstractPredicateTransitionFoo()
+}
+
+type BaseAbstractPredicateTransition struct {
+ BaseTransition
+}
+
+func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
+ return &BaseAbstractPredicateTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ },
+ }
+}
+
+func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
+
+type PredicateTransition struct {
+ BaseAbstractPredicateTransition
+ isCtxDependent bool
+ ruleIndex, predIndex int
+}
+
+func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
+ return &PredicateTransition{
+ BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionPREDICATE,
+ isEpsilon: true,
+ },
+ },
+ isCtxDependent: isCtxDependent,
+ ruleIndex: ruleIndex,
+ predIndex: predIndex,
+ }
+}
+
+func (t *PredicateTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *PredicateTransition) getPredicate() *Predicate {
+ return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
+}
+
+func (t *PredicateTransition) String() string {
+ return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
+}
+
+type ActionTransition struct {
+ BaseTransition
+ isCtxDependent bool
+ ruleIndex, actionIndex, predIndex int
+}
+
+func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
+ return &ActionTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionACTION,
+ isEpsilon: true,
+ },
+ isCtxDependent: isCtxDependent,
+ ruleIndex: ruleIndex,
+ actionIndex: actionIndex,
+ }
+}
+
+func (t *ActionTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *ActionTransition) String() string {
+ return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
+}
+
+type SetTransition struct {
+ BaseTransition
+}
+
+func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
+ t := &SetTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionSET,
+ },
+ }
+
+ if set != nil {
+ t.intervalSet = set
+ } else {
+ t.intervalSet = NewIntervalSet()
+ t.intervalSet.addOne(TokenInvalidType)
+ }
+ return t
+}
+
+func (t *SetTransition) Matches(symbol, _, _ int) bool {
+ return t.intervalSet.contains(symbol)
+}
+
+func (t *SetTransition) String() string {
+ return t.intervalSet.String()
+}
+
+type NotSetTransition struct {
+ SetTransition
+}
+
+func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
+ t := &NotSetTransition{
+ SetTransition: SetTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionNOTSET,
+ },
+ },
+ }
+ if set != nil {
+ t.intervalSet = set
+ } else {
+ t.intervalSet = NewIntervalSet()
+ t.intervalSet.addOne(TokenInvalidType)
+ }
+
+ return t
+}
+
+func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
+}
+
+func (t *NotSetTransition) String() string {
+ return "~" + t.intervalSet.String()
+}
+
+type WildcardTransition struct {
+ BaseTransition
+}
+
+func NewWildcardTransition(target ATNState) *WildcardTransition {
+ return &WildcardTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionWILDCARD,
+ },
+ }
+}
+
+func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
+}
+
+func (t *WildcardTransition) String() string {
+ return "."
+}
+
+type PrecedencePredicateTransition struct {
+ BaseAbstractPredicateTransition
+ precedence int
+}
+
+func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
+ return &PrecedencePredicateTransition{
+ BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionPRECEDENCE,
+ isEpsilon: true,
+ },
+ },
+ precedence: precedence,
+ }
+}
+
+func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
+ return NewPrecedencePredicate(t.precedence)
+}
+
+func (t *PrecedencePredicateTransition) String() string {
+ return fmt.Sprint(t.precedence) + " >= _p"
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/tree.go b/vendor/github.com/antlr4-go/antlr/v4/tree.go
new file mode 100644
index 000000000..c288420fb
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/tree.go
@@ -0,0 +1,304 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The basic notion of a tree has a parent, a payload, and a list of children.
+// It is the most abstract interface for all the trees used by ANTLR.
+///
+
+var TreeInvalidInterval = NewInterval(-1, -2)
+
+type Tree interface {
+ GetParent() Tree
+ SetParent(Tree)
+ GetPayload() interface{}
+ GetChild(i int) Tree
+ GetChildCount() int
+ GetChildren() []Tree
+}
+
+type SyntaxTree interface {
+ Tree
+ GetSourceInterval() Interval
+}
+
+type ParseTree interface {
+ SyntaxTree
+ Accept(Visitor ParseTreeVisitor) interface{}
+ GetText() string
+ ToStringTree([]string, Recognizer) string
+}
+
+type RuleNode interface {
+ ParseTree
+ GetRuleContext() RuleContext
+}
+
+type TerminalNode interface {
+ ParseTree
+ GetSymbol() Token
+}
+
+type ErrorNode interface {
+ TerminalNode
+
+ errorNode()
+}
+
+type ParseTreeVisitor interface {
+ Visit(tree ParseTree) interface{}
+ VisitChildren(node RuleNode) interface{}
+ VisitTerminal(node TerminalNode) interface{}
+ VisitErrorNode(node ErrorNode) interface{}
+}
+
+type BaseParseTreeVisitor struct{}
+
+var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
+
+func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
+func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil }
+
+// TODO: Implement this?
+//func (this ParseTreeVisitor) Visit(ctx) {
+// if (Utils.isArray(ctx)) {
+// self := this
+// return ctx.map(function(child) { return VisitAtom(self, child)})
+// } else {
+// return VisitAtom(this, ctx)
+// }
+//}
+//
+//func VisitAtom(Visitor, ctx) {
+// if (ctx.parser == nil) { //is terminal
+// return
+// }
+//
+// name := ctx.parser.ruleNames[ctx.ruleIndex]
+// funcName := "Visit" + Utils.titleCase(name)
+//
+// return Visitor[funcName](ctx)
+//}
+
+type ParseTreeListener interface {
+ VisitTerminal(node TerminalNode)
+ VisitErrorNode(node ErrorNode)
+ EnterEveryRule(ctx ParserRuleContext)
+ ExitEveryRule(ctx ParserRuleContext)
+}
+
+type BaseParseTreeListener struct{}
+
+var _ ParseTreeListener = &BaseParseTreeListener{}
+
+func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {}
+func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {}
+func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {}
+func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {}
+
+type TerminalNodeImpl struct {
+ parentCtx RuleContext
+ symbol Token
+}
+
+var _ TerminalNode = &TerminalNodeImpl{}
+
+func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
+ tn := new(TerminalNodeImpl)
+
+ tn.parentCtx = nil
+ tn.symbol = symbol
+
+ return tn
+}
+
+func (t *TerminalNodeImpl) GetChild(_ int) Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) GetChildren() []Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) SetChildren(_ []Tree) {
+ panic("Cannot set children on terminal node")
+}
+
+func (t *TerminalNodeImpl) GetSymbol() Token {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetParent() Tree {
+ return t.parentCtx
+}
+
+func (t *TerminalNodeImpl) SetParent(tree Tree) {
+ t.parentCtx = tree.(RuleContext)
+}
+
+func (t *TerminalNodeImpl) GetPayload() interface{} {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetSourceInterval() Interval {
+ if t.symbol == nil {
+ return TreeInvalidInterval
+ }
+ tokenIndex := t.symbol.GetTokenIndex()
+ return NewInterval(tokenIndex, tokenIndex)
+}
+
+func (t *TerminalNodeImpl) GetChildCount() int {
+ return 0
+}
+
+func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitTerminal(t)
+}
+
+func (t *TerminalNodeImpl) GetText() string {
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) String() string {
+ if t.symbol.GetTokenType() == TokenEOF {
+ return ""
+ }
+
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) ToStringTree(_ []string, _ Recognizer) string {
+ return t.String()
+}
+
+// Represents a token that was consumed during reSynchronization
+// rather than during a valid Match operation. For example,
+// we will create this kind of a node during single token insertion
+// and deletion as well as during "consume until error recovery set"
+// upon no viable alternative exceptions.
+
+type ErrorNodeImpl struct {
+ *TerminalNodeImpl
+}
+
+var _ ErrorNode = &ErrorNodeImpl{}
+
+func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
+ en := new(ErrorNodeImpl)
+ en.TerminalNodeImpl = NewTerminalNodeImpl(token)
+ return en
+}
+
+func (e *ErrorNodeImpl) errorNode() {}
+
+func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitErrorNode(e)
+}
+
+type ParseTreeWalker struct {
+}
+
+func NewParseTreeWalker() *ParseTreeWalker {
+ return new(ParseTreeWalker)
+}
+
+// Walk performs a walk on the given parse tree starting at the root and going down recursively
+// with depth-first search. On each node, [EnterRule] is called before
+// recursively walking down into child nodes, then [ExitRule] is called after the recursive call to wind up.
+func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
+ switch tt := t.(type) {
+ case ErrorNode:
+ listener.VisitErrorNode(tt)
+ case TerminalNode:
+ listener.VisitTerminal(tt)
+ default:
+ p.EnterRule(listener, t.(RuleNode))
+ for i := 0; i < t.GetChildCount(); i++ {
+ child := t.GetChild(i)
+ p.Walk(listener, child)
+ }
+ p.ExitRule(listener, t.(RuleNode))
+ }
+}
+
+// EnterRule enters a grammar rule by first triggering the generic event [ParseTreeListener].[EnterEveryRule]
+// then by triggering the event specific to the given parse tree node
+func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+}
+
+// ExitRule exits a grammar rule by first triggering the event specific to the given parse tree node
+// then by triggering the generic event [ParseTreeListener].ExitEveryRule
+func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+}
+
+//goland:noinspection GoUnusedGlobalVariable
+var ParseTreeWalkerDefault = NewParseTreeWalker()
+
+type IterativeParseTreeWalker struct {
+ *ParseTreeWalker
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewIterativeParseTreeWalker() *IterativeParseTreeWalker {
+ return new(IterativeParseTreeWalker)
+}
+
+func (i *IterativeParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
+ var stack []Tree
+ var indexStack []int
+ currentNode := t
+ currentIndex := 0
+
+ for currentNode != nil {
+ // pre-order visit
+ switch tt := currentNode.(type) {
+ case ErrorNode:
+ listener.VisitErrorNode(tt)
+ case TerminalNode:
+ listener.VisitTerminal(tt)
+ default:
+ i.EnterRule(listener, currentNode.(RuleNode))
+ }
+ // Move down to first child, if exists
+ if currentNode.GetChildCount() > 0 {
+ stack = append(stack, currentNode)
+ indexStack = append(indexStack, currentIndex)
+ currentIndex = 0
+ currentNode = currentNode.GetChild(0)
+ continue
+ }
+
+ for {
+ // post-order visit
+ if ruleNode, ok := currentNode.(RuleNode); ok {
+ i.ExitRule(listener, ruleNode)
+ }
+ // No parent, so no siblings
+ if len(stack) == 0 {
+ currentNode = nil
+ currentIndex = 0
+ break
+ }
+ // Move to next sibling if possible
+ currentIndex++
+ if stack[len(stack)-1].GetChildCount() > currentIndex {
+ currentNode = stack[len(stack)-1].GetChild(currentIndex)
+ break
+ }
+ // No next, sibling, so move up
+ currentNode, stack = stack[len(stack)-1], stack[:len(stack)-1]
+ currentIndex, indexStack = indexStack[len(indexStack)-1], indexStack[:len(indexStack)-1]
+ }
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go b/vendor/github.com/antlr4-go/antlr/v4/trees.go
similarity index 81%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go
rename to vendor/github.com/antlr4-go/antlr/v4/trees.go
index d7dbb0322..f44c05d81 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/trees.go
@@ -8,10 +8,8 @@ import "fmt"
/** A set of utility routines useful for all kinds of ANTLR trees. */
-// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
-//
-// node payloads to get the text for the nodes. Detect
-// parse trees and extract data appropriately.
+// TreesStringTree prints out a whole tree in LISP form. [getNodeText] is used on the
+// node payloads to get the text for the nodes. Detects parse trees and extracts data appropriately.
func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
if recog != nil {
@@ -32,7 +30,7 @@ func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
}
for i := 1; i < c; i++ {
s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
- res += (" " + s)
+ res += " " + s
}
res += ")"
return res
@@ -62,7 +60,7 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
}
}
- // no recog for rule names
+ // no recognition for rule names
payload := t.GetPayload()
if p2, ok := payload.(Token); ok {
return p2.GetText()
@@ -71,7 +69,9 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
return fmt.Sprint(t.GetPayload())
}
-// Return ordered list of all children of this node
+// TreesGetChildren returns am ordered list of all children of this node
+//
+//goland:noinspection GoUnusedExportedFunction
func TreesGetChildren(t Tree) []Tree {
list := make([]Tree, 0)
for i := 0; i < t.GetChildCount(); i++ {
@@ -80,9 +80,10 @@ func TreesGetChildren(t Tree) []Tree {
return list
}
-// Return a list of all ancestors of this node. The first node of
+// TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root
+// and the last node is the parent of this node.
//
-// list is the root and the last is the parent of this node.
+//goland:noinspection GoUnusedExportedFunction
func TreesgetAncestors(t Tree) []Tree {
ancestors := make([]Tree, 0)
t = t.GetParent()
@@ -94,10 +95,12 @@ func TreesgetAncestors(t Tree) []Tree {
return ancestors
}
+//goland:noinspection GoUnusedExportedFunction
func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
return TreesfindAllNodes(t, ttype, true)
}
+//goland:noinspection GoUnusedExportedFunction
func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
return TreesfindAllNodes(t, ruleIndex, false)
}
@@ -129,6 +132,7 @@ func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTr
}
}
+//goland:noinspection GoUnusedExportedFunction
func TreesDescendants(t ParseTree) []ParseTree {
nodes := []ParseTree{t}
for i := 0; i < t.GetChildCount(); i++ {
diff --git a/vendor/github.com/antlr4-go/antlr/v4/utils.go b/vendor/github.com/antlr4-go/antlr/v4/utils.go
new file mode 100644
index 000000000..733d7df9d
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/utils.go
@@ -0,0 +1,328 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/bits"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+func intMin(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func intMax(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// A simple integer stack
+
+type IntStack []int
+
+var ErrEmptyStack = errors.New("stack is empty")
+
+func (s *IntStack) Pop() (int, error) {
+ l := len(*s) - 1
+ if l < 0 {
+ return 0, ErrEmptyStack
+ }
+ v := (*s)[l]
+ *s = (*s)[0:l]
+ return v, nil
+}
+
+func (s *IntStack) Push(e int) {
+ *s = append(*s, e)
+}
+
+const bitsPerWord = 64
+
+func indexForBit(bit int) int {
+ return bit / bitsPerWord
+}
+
+//goland:noinspection GoUnusedExportedFunction,GoUnusedFunction
+func wordForBit(data []uint64, bit int) uint64 {
+ idx := indexForBit(bit)
+ if idx >= len(data) {
+ return 0
+ }
+ return data[idx]
+}
+
+func maskForBit(bit int) uint64 {
+ return uint64(1) << (bit % bitsPerWord)
+}
+
+func wordsNeeded(bit int) int {
+ return indexForBit(bit) + 1
+}
+
+type BitSet struct {
+ data []uint64
+}
+
+// NewBitSet creates a new bitwise set
+// TODO: See if we can replace with the standard library's BitSet
+func NewBitSet() *BitSet {
+ return &BitSet{}
+}
+
+func (b *BitSet) add(value int) {
+ idx := indexForBit(value)
+ if idx >= len(b.data) {
+ size := wordsNeeded(value)
+ data := make([]uint64, size)
+ copy(data, b.data)
+ b.data = data
+ }
+ b.data[idx] |= maskForBit(value)
+}
+
+func (b *BitSet) clear(index int) {
+ idx := indexForBit(index)
+ if idx >= len(b.data) {
+ return
+ }
+ b.data[idx] &= ^maskForBit(index)
+}
+
+func (b *BitSet) or(set *BitSet) {
+ // Get min size necessary to represent the bits in both sets.
+ bLen := b.minLen()
+ setLen := set.minLen()
+ maxLen := intMax(bLen, setLen)
+ if maxLen > len(b.data) {
+ // Increase the size of len(b.data) to represent the bits in both sets.
+ data := make([]uint64, maxLen)
+ copy(data, b.data)
+ b.data = data
+ }
+ // len(b.data) is at least setLen.
+ for i := 0; i < setLen; i++ {
+ b.data[i] |= set.data[i]
+ }
+}
+
+func (b *BitSet) remove(value int) {
+ b.clear(value)
+}
+
+func (b *BitSet) contains(value int) bool {
+ idx := indexForBit(value)
+ if idx >= len(b.data) {
+ return false
+ }
+ return (b.data[idx] & maskForBit(value)) != 0
+}
+
+func (b *BitSet) minValue() int {
+ for i, v := range b.data {
+ if v == 0 {
+ continue
+ }
+ return i*bitsPerWord + bits.TrailingZeros64(v)
+ }
+ return 2147483647
+}
+
+func (b *BitSet) equals(other interface{}) bool {
+ otherBitSet, ok := other.(*BitSet)
+ if !ok {
+ return false
+ }
+
+ if b == otherBitSet {
+ return true
+ }
+
+ // We only compare set bits, so we cannot rely on the two slices having the same size. Its
+ // possible for two BitSets to have different slice lengths but the same set bits. So we only
+ // compare the relevant words and ignore the trailing zeros.
+ bLen := b.minLen()
+ otherLen := otherBitSet.minLen()
+
+ if bLen != otherLen {
+ return false
+ }
+
+ for i := 0; i < bLen; i++ {
+ if b.data[i] != otherBitSet.data[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *BitSet) minLen() int {
+ for i := len(b.data); i > 0; i-- {
+ if b.data[i-1] != 0 {
+ return i
+ }
+ }
+ return 0
+}
+
+func (b *BitSet) length() int {
+ cnt := 0
+ for _, val := range b.data {
+ cnt += bits.OnesCount64(val)
+ }
+ return cnt
+}
+
+func (b *BitSet) String() string {
+ vals := make([]string, 0, b.length())
+
+ for i, v := range b.data {
+ for v != 0 {
+ n := bits.TrailingZeros64(v)
+ vals = append(vals, strconv.Itoa(i*bitsPerWord+n))
+ v &= ^(uint64(1) << n)
+ }
+ }
+
+ return "{" + strings.Join(vals, ", ") + "}"
+}
+
+type AltDict struct {
+ data map[string]interface{}
+}
+
+func NewAltDict() *AltDict {
+ d := new(AltDict)
+ d.data = make(map[string]interface{})
+ return d
+}
+
+func (a *AltDict) Get(key string) interface{} {
+ key = "k-" + key
+ return a.data[key]
+}
+
+func (a *AltDict) put(key string, value interface{}) {
+ key = "k-" + key
+ a.data[key] = value
+}
+
+func (a *AltDict) values() []interface{} {
+ vs := make([]interface{}, len(a.data))
+ i := 0
+ for _, v := range a.data {
+ vs[i] = v
+ i++
+ }
+ return vs
+}
+
+func EscapeWhitespace(s string, escapeSpaces bool) string {
+
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ if escapeSpaces {
+ s = strings.Replace(s, " ", "\u00B7", -1)
+ }
+ return s
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func TerminalNodeToStringArray(sa []TerminalNode) []string {
+ st := make([]string, len(sa))
+
+ for i, s := range sa {
+ st[i] = fmt.Sprintf("%v", s)
+ }
+
+ return st
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func PrintArrayJavaStyle(sa []string) string {
+ var buffer bytes.Buffer
+
+ buffer.WriteString("[")
+
+ for i, s := range sa {
+ buffer.WriteString(s)
+ if i != len(sa)-1 {
+ buffer.WriteString(", ")
+ }
+ }
+
+ buffer.WriteString("]")
+
+ return buffer.String()
+}
+
+// murmur hash
+func murmurInit(seed int) int {
+ return seed
+}
+
+func murmurUpdate(h int, value int) int {
+ const c1 uint32 = 0xCC9E2D51
+ const c2 uint32 = 0x1B873593
+ const r1 uint32 = 15
+ const r2 uint32 = 13
+ const m uint32 = 5
+ const n uint32 = 0xE6546B64
+
+ k := uint32(value)
+ k *= c1
+ k = (k << r1) | (k >> (32 - r1))
+ k *= c2
+
+ hash := uint32(h) ^ k
+ hash = (hash << r2) | (hash >> (32 - r2))
+ hash = hash*m + n
+ return int(hash)
+}
+
+func murmurFinish(h int, numberOfWords int) int {
+ var hash = uint32(h)
+ hash ^= uint32(numberOfWords) << 2
+ hash ^= hash >> 16
+ hash *= 0x85ebca6b
+ hash ^= hash >> 13
+ hash *= 0xc2b2ae35
+ hash ^= hash >> 16
+
+ return int(hash)
+}
+
+func isDirectory(dir string) (bool, error) {
+ fileInfo, err := os.Stat(dir)
+ if err != nil {
+ switch {
+ case errors.Is(err, syscall.ENOENT):
+ // The given directory does not exist, so we will try to create it
+ //
+ err = os.MkdirAll(dir, 0755)
+ if err != nil {
+ return false, err
+ }
+
+ return true, nil
+ case err != nil:
+ return false, err
+ default:
+ }
+ }
+ return fileInfo.IsDir(), err
+}
diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml
deleted file mode 100644
index e29f8eef5..000000000
--- a/vendor/github.com/asaskevich/govalidator/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: go
-
-go:
- - 1.1
- - 1.2
- - 1.3
- - 1.4
- - 1.5
- - 1.6
- - tip
-
-notifications:
- email:
- - bwatas@gmail.com
diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
deleted file mode 100644
index f0f7e3a8a..000000000
--- a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
+++ /dev/null
@@ -1,63 +0,0 @@
-#### Support
-If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
-
-#### What to contribute
-If you don't know what to do, there are some features and functions that need to be done
-
-- [ ] Refactor code
-- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
-- [ ] Create actual list of contributors and projects that currently using this package
-- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
-- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
-- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
-- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
-- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
-- [ ] Implement fuzzing testing
-- [ ] Implement some struct/map/array utilities
-- [ ] Implement map/array validation
-- [ ] Implement benchmarking
-- [ ] Implement batch of examples
-- [ ] Look at forks for new features and fixes
-
-#### Advice
-Feel free to create what you want, but keep in mind when you implement new features:
-- Code must be clear and readable, names of variables/constants clearly describes what they are doing
-- Public functions must be documented and described in source file and added to README.md to the list of available functions
-- There are must be unit-tests for any new functions and improvements
-
-## Financial contributions
-
-We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator).
-Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed.
-
-
-## Credits
-
-
-### Contributors
-
-Thank you to all the people who have already contributed to govalidator!
-
-
-
-### Backers
-
-Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)]
-
-
-
-
-### Sponsors
-
-Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor))
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md
deleted file mode 100644
index 40f9a8781..000000000
--- a/vendor/github.com/asaskevich/govalidator/README.md
+++ /dev/null
@@ -1,507 +0,0 @@
-govalidator
-===========
-[](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [](https://godoc.org/github.com/asaskevich/govalidator) [](https://coveralls.io/r/asaskevich/govalidator?branch=master) [](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043)
-[](https://travis-ci.org/asaskevich/govalidator) [](https://goreportcard.com/report/github.com/asaskevich/govalidator) [](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [](#backers) [](#sponsors) [](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield)
-
-A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
-
-#### Installation
-Make sure that Go is installed on your computer.
-Type the following command in your terminal:
-
- go get github.com/asaskevich/govalidator
-
-or you can get specified release of the package with `gopkg.in`:
-
- go get gopkg.in/asaskevich/govalidator.v4
-
-After it the package is ready to use.
-
-
-#### Import package in your project
-Add following line in your `*.go` file:
-```go
-import "github.com/asaskevich/govalidator"
-```
-If you are unhappy to use long `govalidator`, you can do something like this:
-```go
-import (
- valid "github.com/asaskevich/govalidator"
-)
-```
-
-#### Activate behavior to require all fields have a validation tag by default
-`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function.
-
-`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors.
-
-```go
-import "github.com/asaskevich/govalidator"
-
-func init() {
- govalidator.SetFieldsRequiredByDefault(true)
-}
-```
-
-Here's some code to explain it:
-```go
-// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
-type exampleStruct struct {
- Name string ``
- Email string `valid:"email"`
-}
-
-// this, however, will only fail when Email is empty or an invalid email address:
-type exampleStruct2 struct {
- Name string `valid:"-"`
- Email string `valid:"email"`
-}
-
-// lastly, this will only fail when Email is an invalid email address but not when it's empty:
-type exampleStruct2 struct {
- Name string `valid:"-"`
- Email string `valid:"email,optional"`
-}
-```
-
-#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123))
-##### Custom validator function signature
-A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible.
-```go
-import "github.com/asaskevich/govalidator"
-
-// old signature
-func(i interface{}) bool
-
-// new signature
-func(i interface{}, o interface{}) bool
-```
-
-##### Adding a custom validator
-This was changed to prevent data races when accessing custom validators.
-```go
-import "github.com/asaskevich/govalidator"
-
-// before
-govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool {
- // ...
-})
-
-// after
-govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool {
- // ...
-}))
-```
-
-#### List of functions:
-```go
-func Abs(value float64) float64
-func BlackList(str, chars string) string
-func ByteLength(str string, params ...string) bool
-func CamelCaseToUnderscore(str string) string
-func Contains(str, substring string) bool
-func Count(array []interface{}, iterator ConditionIterator) int
-func Each(array []interface{}, iterator Iterator)
-func ErrorByField(e error, field string) string
-func ErrorsByField(e error) map[string]string
-func Filter(array []interface{}, iterator ConditionIterator) []interface{}
-func Find(array []interface{}, iterator ConditionIterator) interface{}
-func GetLine(s string, index int) (string, error)
-func GetLines(s string) []string
-func InRange(value, left, right float64) bool
-func IsASCII(str string) bool
-func IsAlpha(str string) bool
-func IsAlphanumeric(str string) bool
-func IsBase64(str string) bool
-func IsByteLength(str string, min, max int) bool
-func IsCIDR(str string) bool
-func IsCreditCard(str string) bool
-func IsDNSName(str string) bool
-func IsDataURI(str string) bool
-func IsDialString(str string) bool
-func IsDivisibleBy(str, num string) bool
-func IsEmail(str string) bool
-func IsFilePath(str string) (bool, int)
-func IsFloat(str string) bool
-func IsFullWidth(str string) bool
-func IsHalfWidth(str string) bool
-func IsHexadecimal(str string) bool
-func IsHexcolor(str string) bool
-func IsHost(str string) bool
-func IsIP(str string) bool
-func IsIPv4(str string) bool
-func IsIPv6(str string) bool
-func IsISBN(str string, version int) bool
-func IsISBN10(str string) bool
-func IsISBN13(str string) bool
-func IsISO3166Alpha2(str string) bool
-func IsISO3166Alpha3(str string) bool
-func IsISO693Alpha2(str string) bool
-func IsISO693Alpha3b(str string) bool
-func IsISO4217(str string) bool
-func IsIn(str string, params ...string) bool
-func IsInt(str string) bool
-func IsJSON(str string) bool
-func IsLatitude(str string) bool
-func IsLongitude(str string) bool
-func IsLowerCase(str string) bool
-func IsMAC(str string) bool
-func IsMongoID(str string) bool
-func IsMultibyte(str string) bool
-func IsNatural(value float64) bool
-func IsNegative(value float64) bool
-func IsNonNegative(value float64) bool
-func IsNonPositive(value float64) bool
-func IsNull(str string) bool
-func IsNumeric(str string) bool
-func IsPort(str string) bool
-func IsPositive(value float64) bool
-func IsPrintableASCII(str string) bool
-func IsRFC3339(str string) bool
-func IsRFC3339WithoutZone(str string) bool
-func IsRGBcolor(str string) bool
-func IsRequestURI(rawurl string) bool
-func IsRequestURL(rawurl string) bool
-func IsSSN(str string) bool
-func IsSemver(str string) bool
-func IsTime(str string, format string) bool
-func IsURL(str string) bool
-func IsUTFDigit(str string) bool
-func IsUTFLetter(str string) bool
-func IsUTFLetterNumeric(str string) bool
-func IsUTFNumeric(str string) bool
-func IsUUID(str string) bool
-func IsUUIDv3(str string) bool
-func IsUUIDv4(str string) bool
-func IsUUIDv5(str string) bool
-func IsUpperCase(str string) bool
-func IsVariableWidth(str string) bool
-func IsWhole(value float64) bool
-func LeftTrim(str, chars string) string
-func Map(array []interface{}, iterator ResultIterator) []interface{}
-func Matches(str, pattern string) bool
-func NormalizeEmail(str string) (string, error)
-func PadBoth(str string, padStr string, padLen int) string
-func PadLeft(str string, padStr string, padLen int) string
-func PadRight(str string, padStr string, padLen int) string
-func Range(str string, params ...string) bool
-func RemoveTags(s string) string
-func ReplacePattern(str, pattern, replace string) string
-func Reverse(s string) string
-func RightTrim(str, chars string) string
-func RuneLength(str string, params ...string) bool
-func SafeFileName(str string) string
-func SetFieldsRequiredByDefault(value bool)
-func Sign(value float64) float64
-func StringLength(str string, params ...string) bool
-func StringMatches(s string, params ...string) bool
-func StripLow(str string, keepNewLines bool) string
-func ToBoolean(str string) (bool, error)
-func ToFloat(str string) (float64, error)
-func ToInt(str string) (int64, error)
-func ToJSON(obj interface{}) (string, error)
-func ToString(obj interface{}) string
-func Trim(str, chars string) string
-func Truncate(str string, length int, ending string) string
-func UnderscoreToCamelCase(s string) string
-func ValidateStruct(s interface{}) (bool, error)
-func WhiteList(str, chars string) string
-type ConditionIterator
-type CustomTypeValidator
-type Error
-func (e Error) Error() string
-type Errors
-func (es Errors) Error() string
-func (es Errors) Errors() []error
-type ISO3166Entry
-type Iterator
-type ParamValidator
-type ResultIterator
-type UnsupportedTypeError
-func (e *UnsupportedTypeError) Error() string
-type Validator
-```
-
-#### Examples
-###### IsURL
-```go
-println(govalidator.IsURL(`http://user@pass:domain.com/path/page`))
-```
-###### ToString
-```go
-type User struct {
- FirstName string
- LastName string
-}
-
-str := govalidator.ToString(&User{"John", "Juan"})
-println(str)
-```
-###### Each, Map, Filter, Count for slices
-Each iterates over the slice/array and calls Iterator for every item
-```go
-data := []interface{}{1, 2, 3, 4, 5}
-var fn govalidator.Iterator = func(value interface{}, index int) {
- println(value.(int))
-}
-govalidator.Each(data, fn)
-```
-```go
-data := []interface{}{1, 2, 3, 4, 5}
-var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} {
- return value.(int) * 3
-}
-_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15}
-```
-```go
-data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
-var fn govalidator.ConditionIterator = func(value interface{}, index int) bool {
- return value.(int)%2 == 0
-}
-_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10}
-_ = govalidator.Count(data, fn) // result = 5
-```
-###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2)
-If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this:
-```go
-govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
- return str == "duck"
-})
-```
-For completely custom validators (interface-based), see below.
-
-Here is a list of available validators for struct fields (validator - used function):
-```go
-"email": IsEmail,
-"url": IsURL,
-"dialstring": IsDialString,
-"requrl": IsRequestURL,
-"requri": IsRequestURI,
-"alpha": IsAlpha,
-"utfletter": IsUTFLetter,
-"alphanum": IsAlphanumeric,
-"utfletternum": IsUTFLetterNumeric,
-"numeric": IsNumeric,
-"utfnumeric": IsUTFNumeric,
-"utfdigit": IsUTFDigit,
-"hexadecimal": IsHexadecimal,
-"hexcolor": IsHexcolor,
-"rgbcolor": IsRGBcolor,
-"lowercase": IsLowerCase,
-"uppercase": IsUpperCase,
-"int": IsInt,
-"float": IsFloat,
-"null": IsNull,
-"uuid": IsUUID,
-"uuidv3": IsUUIDv3,
-"uuidv4": IsUUIDv4,
-"uuidv5": IsUUIDv5,
-"creditcard": IsCreditCard,
-"isbn10": IsISBN10,
-"isbn13": IsISBN13,
-"json": IsJSON,
-"multibyte": IsMultibyte,
-"ascii": IsASCII,
-"printableascii": IsPrintableASCII,
-"fullwidth": IsFullWidth,
-"halfwidth": IsHalfWidth,
-"variablewidth": IsVariableWidth,
-"base64": IsBase64,
-"datauri": IsDataURI,
-"ip": IsIP,
-"port": IsPort,
-"ipv4": IsIPv4,
-"ipv6": IsIPv6,
-"dns": IsDNSName,
-"host": IsHost,
-"mac": IsMAC,
-"latitude": IsLatitude,
-"longitude": IsLongitude,
-"ssn": IsSSN,
-"semver": IsSemver,
-"rfc3339": IsRFC3339,
-"rfc3339WithoutZone": IsRFC3339WithoutZone,
-"ISO3166Alpha2": IsISO3166Alpha2,
-"ISO3166Alpha3": IsISO3166Alpha3,
-```
-Validators with parameters
-
-```go
-"range(min|max)": Range,
-"length(min|max)": ByteLength,
-"runelength(min|max)": RuneLength,
-"stringlength(min|max)": StringLength,
-"matches(pattern)": StringMatches,
-"in(string1|string2|...|stringN)": IsIn,
-"rsapub(keylength)" : IsRsaPub,
-```
-
-And here is small example of usage:
-```go
-type Post struct {
- Title string `valid:"alphanum,required"`
- Message string `valid:"duck,ascii"`
- Message2 string `valid:"animal(dog)"`
- AuthorIP string `valid:"ipv4"`
- Date string `valid:"-"`
-}
-post := &Post{
- Title: "My Example Post",
- Message: "duck",
- Message2: "dog",
- AuthorIP: "123.234.54.3",
-}
-
-// Add your own struct validation tags
-govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
- return str == "duck"
-})
-
-// Add your own struct validation tags with parameter
-govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool {
- species := params[0]
- return str == species
-})
-govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$")
-
-result, err := govalidator.ValidateStruct(post)
-if err != nil {
- println("error: " + err.Error())
-}
-println(result)
-```
-###### WhiteList
-```go
-// Remove all characters from string ignoring characters between "a" and "z"
-println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa")
-```
-
-###### Custom validation functions
-Custom validation using your own domain specific validators is also available - here's an example of how to use it:
-```go
-import "github.com/asaskevich/govalidator"
-
-type CustomByteArray [6]byte // custom types are supported and can be validated
-
-type StructWithCustomByteArray struct {
- ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence
- Email string `valid:"email"`
- CustomMinLength int `valid:"-"`
-}
-
-govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
- switch v := context.(type) { // you can type switch on the context interface being validated
- case StructWithCustomByteArray:
- // you can check and validate against some other field in the context,
- // return early or not validate against the context at all – your choice
- case SomeOtherType:
- // ...
- default:
- // expecting some other type? Throw/panic here or continue
- }
-
- switch v := i.(type) { // type switch on the struct field being validated
- case CustomByteArray:
- for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes
- if e != 0 {
- return true
- }
- }
- }
- return false
-}))
-govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
- switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation
- case StructWithCustomByteArray:
- return len(v.ID) >= v.CustomMinLength
- }
- return false
-}))
-```
-
-###### Custom error messages
-Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it:
-```go
-type Ticket struct {
- Id int64 `json:"id"`
- FirstName string `json:"firstname" valid:"required~First name is blank"`
-}
-```
-
-#### Notes
-Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator).
-Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator).
-
-#### Support
-If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
-
-#### What to contribute
-If you don't know what to do, there are some features and functions that need to be done
-
-- [ ] Refactor code
-- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
-- [ ] Create actual list of contributors and projects that currently using this package
-- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
-- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
-- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
-- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
-- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
-- [ ] Implement fuzzing testing
-- [ ] Implement some struct/map/array utilities
-- [ ] Implement map/array validation
-- [ ] Implement benchmarking
-- [ ] Implement batch of examples
-- [ ] Look at forks for new features and fixes
-
-#### Advice
-Feel free to create what you want, but keep in mind when you implement new features:
-- Code must be clear and readable, names of variables/constants clearly describes what they are doing
-- Public functions must be documented and described in source file and added to README.md to the list of available functions
-- There are must be unit-tests for any new functions and improvements
-
-## Credits
-### Contributors
-
-This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)].
-
-#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors)
-* [Daniel Lohse](https://github.com/annismckenzie)
-* [Attila Oláh](https://github.com/attilaolah)
-* [Daniel Korner](https://github.com/Dadie)
-* [Steven Wilkin](https://github.com/stevenwilkin)
-* [Deiwin Sarjas](https://github.com/deiwin)
-* [Noah Shibley](https://github.com/slugmobile)
-* [Nathan Davies](https://github.com/nathj07)
-* [Matt Sanford](https://github.com/mzsanford)
-* [Simon ccl1115](https://github.com/ccl1115)
-
-
-
-
-### Backers
-
-Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)]
-
-
-
-
-### Sponsors
-
-Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)]
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-## License
-[](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large)
\ No newline at end of file
diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go
deleted file mode 100644
index 5bace2654..000000000
--- a/vendor/github.com/asaskevich/govalidator/arrays.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package govalidator
-
-// Iterator is the function that accepts element of slice/array and its index
-type Iterator func(interface{}, int)
-
-// ResultIterator is the function that accepts element of slice/array and its index and returns any result
-type ResultIterator func(interface{}, int) interface{}
-
-// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean
-type ConditionIterator func(interface{}, int) bool
-
-// Each iterates over the slice and apply Iterator to every item
-func Each(array []interface{}, iterator Iterator) {
- for index, data := range array {
- iterator(data, index)
- }
-}
-
-// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result.
-func Map(array []interface{}, iterator ResultIterator) []interface{} {
- var result = make([]interface{}, len(array))
- for index, data := range array {
- result[index] = iterator(data, index)
- }
- return result
-}
-
-// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise.
-func Find(array []interface{}, iterator ConditionIterator) interface{} {
- for index, data := range array {
- if iterator(data, index) {
- return data
- }
- }
- return nil
-}
-
-// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice.
-func Filter(array []interface{}, iterator ConditionIterator) []interface{} {
- var result = make([]interface{}, 0)
- for index, data := range array {
- if iterator(data, index) {
- result = append(result, data)
- }
- }
- return result
-}
-
-// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator.
-func Count(array []interface{}, iterator ConditionIterator) int {
- count := 0
- for index, data := range array {
- if iterator(data, index) {
- count = count + 1
- }
- }
- return count
-}
diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go
deleted file mode 100644
index cf1e5d569..000000000
--- a/vendor/github.com/asaskevich/govalidator/converter.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package govalidator
-
-import (
- "encoding/json"
- "fmt"
- "reflect"
- "strconv"
-)
-
-// ToString convert the input to a string.
-func ToString(obj interface{}) string {
- res := fmt.Sprintf("%v", obj)
- return string(res)
-}
-
-// ToJSON convert the input to a valid JSON string
-func ToJSON(obj interface{}) (string, error) {
- res, err := json.Marshal(obj)
- if err != nil {
- res = []byte("")
- }
- return string(res), err
-}
-
-// ToFloat convert the input string to a float, or 0.0 if the input is not a float.
-func ToFloat(str string) (float64, error) {
- res, err := strconv.ParseFloat(str, 64)
- if err != nil {
- res = 0.0
- }
- return res, err
-}
-
-// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer.
-func ToInt(value interface{}) (res int64, err error) {
- val := reflect.ValueOf(value)
-
- switch value.(type) {
- case int, int8, int16, int32, int64:
- res = val.Int()
- case uint, uint8, uint16, uint32, uint64:
- res = int64(val.Uint())
- case string:
- if IsInt(val.String()) {
- res, err = strconv.ParseInt(val.String(), 0, 64)
- if err != nil {
- res = 0
- }
- } else {
- err = fmt.Errorf("math: square root of negative number %g", value)
- res = 0
- }
- default:
- err = fmt.Errorf("math: square root of negative number %g", value)
- res = 0
- }
-
- return
-}
-
-// ToBoolean convert the input string to a boolean.
-func ToBoolean(str string) (bool, error) {
- return strconv.ParseBool(str)
-}
diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go
deleted file mode 100644
index 655b750cb..000000000
--- a/vendor/github.com/asaskevich/govalidator/error.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package govalidator
-
-import "strings"
-
-// Errors is an array of multiple errors and conforms to the error interface.
-type Errors []error
-
-// Errors returns itself.
-func (es Errors) Errors() []error {
- return es
-}
-
-func (es Errors) Error() string {
- var errs []string
- for _, e := range es {
- errs = append(errs, e.Error())
- }
- return strings.Join(errs, ";")
-}
-
-// Error encapsulates a name, an error and whether there's a custom error message or not.
-type Error struct {
- Name string
- Err error
- CustomErrorMessageExists bool
-
- // Validator indicates the name of the validator that failed
- Validator string
- Path []string
-}
-
-func (e Error) Error() string {
- if e.CustomErrorMessageExists {
- return e.Err.Error()
- }
-
- errName := e.Name
- if len(e.Path) > 0 {
- errName = strings.Join(append(e.Path, e.Name), ".")
- }
-
- return errName + ": " + e.Err.Error()
-}
diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go
deleted file mode 100644
index 7e6c652e1..000000000
--- a/vendor/github.com/asaskevich/govalidator/numerics.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package govalidator
-
-import (
- "math"
- "reflect"
-)
-
-// Abs returns absolute value of number
-func Abs(value float64) float64 {
- return math.Abs(value)
-}
-
-// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise
-func Sign(value float64) float64 {
- if value > 0 {
- return 1
- } else if value < 0 {
- return -1
- } else {
- return 0
- }
-}
-
-// IsNegative returns true if value < 0
-func IsNegative(value float64) bool {
- return value < 0
-}
-
-// IsPositive returns true if value > 0
-func IsPositive(value float64) bool {
- return value > 0
-}
-
-// IsNonNegative returns true if value >= 0
-func IsNonNegative(value float64) bool {
- return value >= 0
-}
-
-// IsNonPositive returns true if value <= 0
-func IsNonPositive(value float64) bool {
- return value <= 0
-}
-
-// InRange returns true if value lies between left and right border
-func InRangeInt(value, left, right interface{}) bool {
- value64, _ := ToInt(value)
- left64, _ := ToInt(left)
- right64, _ := ToInt(right)
- if left64 > right64 {
- left64, right64 = right64, left64
- }
- return value64 >= left64 && value64 <= right64
-}
-
-// InRange returns true if value lies between left and right border
-func InRangeFloat32(value, left, right float32) bool {
- if left > right {
- left, right = right, left
- }
- return value >= left && value <= right
-}
-
-// InRange returns true if value lies between left and right border
-func InRangeFloat64(value, left, right float64) bool {
- if left > right {
- left, right = right, left
- }
- return value >= left && value <= right
-}
-
-// InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type
-func InRange(value interface{}, left interface{}, right interface{}) bool {
-
- reflectValue := reflect.TypeOf(value).Kind()
- reflectLeft := reflect.TypeOf(left).Kind()
- reflectRight := reflect.TypeOf(right).Kind()
-
- if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int {
- return InRangeInt(value.(int), left.(int), right.(int))
- } else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 {
- return InRangeFloat32(value.(float32), left.(float32), right.(float32))
- } else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 {
- return InRangeFloat64(value.(float64), left.(float64), right.(float64))
- } else {
- return false
- }
-}
-
-// IsWhole returns true if value is whole number
-func IsWhole(value float64) bool {
- return math.Remainder(value, 1) == 0
-}
-
-// IsNatural returns true if value is natural number (positive and whole)
-func IsNatural(value float64) bool {
- return IsWhole(value) && IsPositive(value)
-}
diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go
deleted file mode 100644
index 61a05d438..000000000
--- a/vendor/github.com/asaskevich/govalidator/patterns.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package govalidator
-
-import "regexp"
-
-// Basic regular expressions for validating strings
-const (
- Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
- CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$"
- ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
- ISBN13 string = "^(?:[0-9]{13})$"
- UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
- UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
- UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
- UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
- Alpha string = "^[a-zA-Z]+$"
- Alphanumeric string = "^[a-zA-Z0-9]+$"
- Numeric string = "^[0-9]+$"
- Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
- Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
- Hexadecimal string = "^[0-9a-fA-F]+$"
- Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
- RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
- ASCII string = "^[\x00-\x7F]+$"
- Multibyte string = "[^\x00-\x7F]"
- FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
- HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
- Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
- PrintableASCII string = "^[\x20-\x7E]+$"
- DataURI string = "^data:.+\\/(.+);base64$"
- Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
- Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
- DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`
- IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
- URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
- URLUsername string = `(\S+(:\S*)?@)`
- URLPath string = `((\/|\?|#)[^\s]*)`
- URLPort string = `(:(\d{1,5}))`
- URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))`
- URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))`
- URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
- SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
- WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
- UnixPath string = `^(/[^/\x00]*)+/?$`
- Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
- tagName string = "valid"
- hasLowerCase string = ".*[[:lower:]]"
- hasUpperCase string = ".*[[:upper:]]"
- hasWhitespace string = ".*[[:space:]]"
- hasWhitespaceOnly string = "^[[:space:]]+$"
-)
-
-// Used by IsFilePath func
-const (
- // Unknown is unresolved OS type
- Unknown = iota
- // Win is Windows type
- Win
- // Unix is *nix OS types
- Unix
-)
-
-var (
- userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$")
- hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$")
- userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})")
- rxEmail = regexp.MustCompile(Email)
- rxCreditCard = regexp.MustCompile(CreditCard)
- rxISBN10 = regexp.MustCompile(ISBN10)
- rxISBN13 = regexp.MustCompile(ISBN13)
- rxUUID3 = regexp.MustCompile(UUID3)
- rxUUID4 = regexp.MustCompile(UUID4)
- rxUUID5 = regexp.MustCompile(UUID5)
- rxUUID = regexp.MustCompile(UUID)
- rxAlpha = regexp.MustCompile(Alpha)
- rxAlphanumeric = regexp.MustCompile(Alphanumeric)
- rxNumeric = regexp.MustCompile(Numeric)
- rxInt = regexp.MustCompile(Int)
- rxFloat = regexp.MustCompile(Float)
- rxHexadecimal = regexp.MustCompile(Hexadecimal)
- rxHexcolor = regexp.MustCompile(Hexcolor)
- rxRGBcolor = regexp.MustCompile(RGBcolor)
- rxASCII = regexp.MustCompile(ASCII)
- rxPrintableASCII = regexp.MustCompile(PrintableASCII)
- rxMultibyte = regexp.MustCompile(Multibyte)
- rxFullWidth = regexp.MustCompile(FullWidth)
- rxHalfWidth = regexp.MustCompile(HalfWidth)
- rxBase64 = regexp.MustCompile(Base64)
- rxDataURI = regexp.MustCompile(DataURI)
- rxLatitude = regexp.MustCompile(Latitude)
- rxLongitude = regexp.MustCompile(Longitude)
- rxDNSName = regexp.MustCompile(DNSName)
- rxURL = regexp.MustCompile(URL)
- rxSSN = regexp.MustCompile(SSN)
- rxWinPath = regexp.MustCompile(WinPath)
- rxUnixPath = regexp.MustCompile(UnixPath)
- rxSemver = regexp.MustCompile(Semver)
- rxHasLowerCase = regexp.MustCompile(hasLowerCase)
- rxHasUpperCase = regexp.MustCompile(hasUpperCase)
- rxHasWhitespace = regexp.MustCompile(hasWhitespace)
- rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly)
-)
diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go
deleted file mode 100644
index 4f7e9274a..000000000
--- a/vendor/github.com/asaskevich/govalidator/types.go
+++ /dev/null
@@ -1,636 +0,0 @@
-package govalidator
-
-import (
- "reflect"
- "regexp"
- "sort"
- "sync"
-)
-
-// Validator is a wrapper for a validator function that returns bool and accepts string.
-type Validator func(str string) bool
-
-// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type.
-// The second parameter should be the context (in the case of validating a struct: the whole object being validated).
-type CustomTypeValidator func(i interface{}, o interface{}) bool
-
-// ParamValidator is a wrapper for validator functions that accepts additional parameters.
-type ParamValidator func(str string, params ...string) bool
-type tagOptionsMap map[string]tagOption
-
-func (t tagOptionsMap) orderedKeys() []string {
- var keys []string
- for k := range t {
- keys = append(keys, k)
- }
-
- sort.Slice(keys, func(a, b int) bool {
- return t[keys[a]].order < t[keys[b]].order
- })
-
- return keys
-}
-
-type tagOption struct {
- name string
- customErrorMessage string
- order int
-}
-
-// UnsupportedTypeError is a wrapper for reflect.Type
-type UnsupportedTypeError struct {
- Type reflect.Type
-}
-
-// stringValues is a slice of reflect.Value holding *reflect.StringValue.
-// It implements the methods to sort by string.
-type stringValues []reflect.Value
-
-// ParamTagMap is a map of functions accept variants parameters
-var ParamTagMap = map[string]ParamValidator{
- "length": ByteLength,
- "range": Range,
- "runelength": RuneLength,
- "stringlength": StringLength,
- "matches": StringMatches,
- "in": isInRaw,
- "rsapub": IsRsaPub,
-}
-
-// ParamTagRegexMap maps param tags to their respective regexes.
-var ParamTagRegexMap = map[string]*regexp.Regexp{
- "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"),
- "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
- "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"),
- "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
- "in": regexp.MustCompile(`^in\((.*)\)`),
- "matches": regexp.MustCompile(`^matches\((.+)\)$`),
- "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"),
-}
-
-type customTypeTagMap struct {
- validators map[string]CustomTypeValidator
-
- sync.RWMutex
-}
-
-func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) {
- tm.RLock()
- defer tm.RUnlock()
- v, ok := tm.validators[name]
- return v, ok
-}
-
-func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) {
- tm.Lock()
- defer tm.Unlock()
- tm.validators[name] = ctv
-}
-
-// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function.
-// Use this to validate compound or custom types that need to be handled as a whole, e.g.
-// `type UUID [16]byte` (this would be handled as an array of bytes).
-var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)}
-
-// TagMap is a map of functions, that can be used as tags for ValidateStruct function.
-var TagMap = map[string]Validator{
- "email": IsEmail,
- "url": IsURL,
- "dialstring": IsDialString,
- "requrl": IsRequestURL,
- "requri": IsRequestURI,
- "alpha": IsAlpha,
- "utfletter": IsUTFLetter,
- "alphanum": IsAlphanumeric,
- "utfletternum": IsUTFLetterNumeric,
- "numeric": IsNumeric,
- "utfnumeric": IsUTFNumeric,
- "utfdigit": IsUTFDigit,
- "hexadecimal": IsHexadecimal,
- "hexcolor": IsHexcolor,
- "rgbcolor": IsRGBcolor,
- "lowercase": IsLowerCase,
- "uppercase": IsUpperCase,
- "int": IsInt,
- "float": IsFloat,
- "null": IsNull,
- "uuid": IsUUID,
- "uuidv3": IsUUIDv3,
- "uuidv4": IsUUIDv4,
- "uuidv5": IsUUIDv5,
- "creditcard": IsCreditCard,
- "isbn10": IsISBN10,
- "isbn13": IsISBN13,
- "json": IsJSON,
- "multibyte": IsMultibyte,
- "ascii": IsASCII,
- "printableascii": IsPrintableASCII,
- "fullwidth": IsFullWidth,
- "halfwidth": IsHalfWidth,
- "variablewidth": IsVariableWidth,
- "base64": IsBase64,
- "datauri": IsDataURI,
- "ip": IsIP,
- "port": IsPort,
- "ipv4": IsIPv4,
- "ipv6": IsIPv6,
- "dns": IsDNSName,
- "host": IsHost,
- "mac": IsMAC,
- "latitude": IsLatitude,
- "longitude": IsLongitude,
- "ssn": IsSSN,
- "semver": IsSemver,
- "rfc3339": IsRFC3339,
- "rfc3339WithoutZone": IsRFC3339WithoutZone,
- "ISO3166Alpha2": IsISO3166Alpha2,
- "ISO3166Alpha3": IsISO3166Alpha3,
- "ISO4217": IsISO4217,
-}
-
-// ISO3166Entry stores country codes
-type ISO3166Entry struct {
- EnglishShortName string
- FrenchShortName string
- Alpha2Code string
- Alpha3Code string
- Numeric string
-}
-
-//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes"
-var ISO3166List = []ISO3166Entry{
- {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"},
- {"Albania", "Albanie (l')", "AL", "ALB", "008"},
- {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"},
- {"Algeria", "Algérie (l')", "DZ", "DZA", "012"},
- {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"},
- {"Andorra", "Andorre (l')", "AD", "AND", "020"},
- {"Angola", "Angola (l')", "AO", "AGO", "024"},
- {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"},
- {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"},
- {"Argentina", "Argentine (l')", "AR", "ARG", "032"},
- {"Australia", "Australie (l')", "AU", "AUS", "036"},
- {"Austria", "Autriche (l')", "AT", "AUT", "040"},
- {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"},
- {"Bahrain", "Bahreïn", "BH", "BHR", "048"},
- {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"},
- {"Armenia", "Arménie (l')", "AM", "ARM", "051"},
- {"Barbados", "Barbade (la)", "BB", "BRB", "052"},
- {"Belgium", "Belgique (la)", "BE", "BEL", "056"},
- {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"},
- {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"},
- {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"},
- {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"},
- {"Botswana", "Botswana (le)", "BW", "BWA", "072"},
- {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"},
- {"Brazil", "Brésil (le)", "BR", "BRA", "076"},
- {"Belize", "Belize (le)", "BZ", "BLZ", "084"},
- {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"},
- {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"},
- {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"},
- {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"},
- {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"},
- {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"},
- {"Burundi", "Burundi (le)", "BI", "BDI", "108"},
- {"Belarus", "Bélarus (le)", "BY", "BLR", "112"},
- {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"},
- {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"},
- {"Canada", "Canada (le)", "CA", "CAN", "124"},
- {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"},
- {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"},
- {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"},
- {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"},
- {"Chad", "Tchad (le)", "TD", "TCD", "148"},
- {"Chile", "Chili (le)", "CL", "CHL", "152"},
- {"China", "Chine (la)", "CN", "CHN", "156"},
- {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"},
- {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"},
- {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"},
- {"Colombia", "Colombie (la)", "CO", "COL", "170"},
- {"Comoros (the)", "Comores (les)", "KM", "COM", "174"},
- {"Mayotte", "Mayotte", "YT", "MYT", "175"},
- {"Congo (the)", "Congo (le)", "CG", "COG", "178"},
- {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"},
- {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"},
- {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"},
- {"Croatia", "Croatie (la)", "HR", "HRV", "191"},
- {"Cuba", "Cuba", "CU", "CUB", "192"},
- {"Cyprus", "Chypre", "CY", "CYP", "196"},
- {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"},
- {"Benin", "Bénin (le)", "BJ", "BEN", "204"},
- {"Denmark", "Danemark (le)", "DK", "DNK", "208"},
- {"Dominica", "Dominique (la)", "DM", "DMA", "212"},
- {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"},
- {"Ecuador", "Équateur (l')", "EC", "ECU", "218"},
- {"El Salvador", "El Salvador", "SV", "SLV", "222"},
- {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"},
- {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"},
- {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"},
- {"Estonia", "Estonie (l')", "EE", "EST", "233"},
- {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"},
- {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"},
- {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"},
- {"Fiji", "Fidji (les)", "FJ", "FJI", "242"},
- {"Finland", "Finlande (la)", "FI", "FIN", "246"},
- {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"},
- {"France", "France (la)", "FR", "FRA", "250"},
- {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"},
- {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"},
- {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"},
- {"Djibouti", "Djibouti", "DJ", "DJI", "262"},
- {"Gabon", "Gabon (le)", "GA", "GAB", "266"},
- {"Georgia", "Géorgie (la)", "GE", "GEO", "268"},
- {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"},
- {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"},
- {"Germany", "Allemagne (l')", "DE", "DEU", "276"},
- {"Ghana", "Ghana (le)", "GH", "GHA", "288"},
- {"Gibraltar", "Gibraltar", "GI", "GIB", "292"},
- {"Kiribati", "Kiribati", "KI", "KIR", "296"},
- {"Greece", "Grèce (la)", "GR", "GRC", "300"},
- {"Greenland", "Groenland (le)", "GL", "GRL", "304"},
- {"Grenada", "Grenade (la)", "GD", "GRD", "308"},
- {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"},
- {"Guam", "Guam", "GU", "GUM", "316"},
- {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"},
- {"Guinea", "Guinée (la)", "GN", "GIN", "324"},
- {"Guyana", "Guyana (le)", "GY", "GUY", "328"},
- {"Haiti", "Haïti", "HT", "HTI", "332"},
- {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"},
- {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"},
- {"Honduras", "Honduras (le)", "HN", "HND", "340"},
- {"Hong Kong", "Hong Kong", "HK", "HKG", "344"},
- {"Hungary", "Hongrie (la)", "HU", "HUN", "348"},
- {"Iceland", "Islande (l')", "IS", "ISL", "352"},
- {"India", "Inde (l')", "IN", "IND", "356"},
- {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"},
- {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"},
- {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"},
- {"Ireland", "Irlande (l')", "IE", "IRL", "372"},
- {"Israel", "Israël", "IL", "ISR", "376"},
- {"Italy", "Italie (l')", "IT", "ITA", "380"},
- {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"},
- {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"},
- {"Japan", "Japon (le)", "JP", "JPN", "392"},
- {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"},
- {"Jordan", "Jordanie (la)", "JO", "JOR", "400"},
- {"Kenya", "Kenya (le)", "KE", "KEN", "404"},
- {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"},
- {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"},
- {"Kuwait", "Koweït (le)", "KW", "KWT", "414"},
- {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"},
- {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"},
- {"Lebanon", "Liban (le)", "LB", "LBN", "422"},
- {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"},
- {"Latvia", "Lettonie (la)", "LV", "LVA", "428"},
- {"Liberia", "Libéria (le)", "LR", "LBR", "430"},
- {"Libya", "Libye (la)", "LY", "LBY", "434"},
- {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"},
- {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"},
- {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"},
- {"Macao", "Macao", "MO", "MAC", "446"},
- {"Madagascar", "Madagascar", "MG", "MDG", "450"},
- {"Malawi", "Malawi (le)", "MW", "MWI", "454"},
- {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"},
- {"Maldives", "Maldives (les)", "MV", "MDV", "462"},
- {"Mali", "Mali (le)", "ML", "MLI", "466"},
- {"Malta", "Malte", "MT", "MLT", "470"},
- {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"},
- {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"},
- {"Mauritius", "Maurice", "MU", "MUS", "480"},
- {"Mexico", "Mexique (le)", "MX", "MEX", "484"},
- {"Monaco", "Monaco", "MC", "MCO", "492"},
- {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"},
- {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"},
- {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"},
- {"Montserrat", "Montserrat", "MS", "MSR", "500"},
- {"Morocco", "Maroc (le)", "MA", "MAR", "504"},
- {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"},
- {"Oman", "Oman", "OM", "OMN", "512"},
- {"Namibia", "Namibie (la)", "NA", "NAM", "516"},
- {"Nauru", "Nauru", "NR", "NRU", "520"},
- {"Nepal", "Népal (le)", "NP", "NPL", "524"},
- {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"},
- {"Curaçao", "Curaçao", "CW", "CUW", "531"},
- {"Aruba", "Aruba", "AW", "ABW", "533"},
- {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"},
- {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"},
- {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"},
- {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"},
- {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"},
- {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"},
- {"Niger (the)", "Niger (le)", "NE", "NER", "562"},
- {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"},
- {"Niue", "Niue", "NU", "NIU", "570"},
- {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"},
- {"Norway", "Norvège (la)", "NO", "NOR", "578"},
- {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"},
- {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"},
- {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"},
- {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"},
- {"Palau", "Palaos (les)", "PW", "PLW", "585"},
- {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"},
- {"Panama", "Panama (le)", "PA", "PAN", "591"},
- {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"},
- {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"},
- {"Peru", "Pérou (le)", "PE", "PER", "604"},
- {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"},
- {"Pitcairn", "Pitcairn", "PN", "PCN", "612"},
- {"Poland", "Pologne (la)", "PL", "POL", "616"},
- {"Portugal", "Portugal (le)", "PT", "PRT", "620"},
- {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"},
- {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"},
- {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"},
- {"Qatar", "Qatar (le)", "QA", "QAT", "634"},
- {"Réunion", "Réunion (La)", "RE", "REU", "638"},
- {"Romania", "Roumanie (la)", "RO", "ROU", "642"},
- {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"},
- {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"},
- {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"},
- {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"},
- {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"},
- {"Anguilla", "Anguilla", "AI", "AIA", "660"},
- {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"},
- {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"},
- {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"},
- {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"},
- {"San Marino", "Saint-Marin", "SM", "SMR", "674"},
- {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"},
- {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"},
- {"Senegal", "Sénégal (le)", "SN", "SEN", "686"},
- {"Serbia", "Serbie (la)", "RS", "SRB", "688"},
- {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"},
- {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"},
- {"Singapore", "Singapour", "SG", "SGP", "702"},
- {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"},
- {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"},
- {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"},
- {"Somalia", "Somalie (la)", "SO", "SOM", "706"},
- {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"},
- {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"},
- {"Spain", "Espagne (l')", "ES", "ESP", "724"},
- {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"},
- {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"},
- {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"},
- {"Suriname", "Suriname (le)", "SR", "SUR", "740"},
- {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"},
- {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"},
- {"Sweden", "Suède (la)", "SE", "SWE", "752"},
- {"Switzerland", "Suisse (la)", "CH", "CHE", "756"},
- {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"},
- {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"},
- {"Thailand", "Thaïlande (la)", "TH", "THA", "764"},
- {"Togo", "Togo (le)", "TG", "TGO", "768"},
- {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"},
- {"Tonga", "Tonga (les)", "TO", "TON", "776"},
- {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"},
- {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"},
- {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"},
- {"Turkey", "Turquie (la)", "TR", "TUR", "792"},
- {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"},
- {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"},
- {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"},
- {"Uganda", "Ouganda (l')", "UG", "UGA", "800"},
- {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"},
- {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"},
- {"Egypt", "Égypte (l')", "EG", "EGY", "818"},
- {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"},
- {"Guernsey", "Guernesey", "GG", "GGY", "831"},
- {"Jersey", "Jersey", "JE", "JEY", "832"},
- {"Isle of Man", "Île de Man", "IM", "IMN", "833"},
- {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"},
- {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"},
- {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"},
- {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"},
- {"Uruguay", "Uruguay (l')", "UY", "URY", "858"},
- {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"},
- {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"},
- {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"},
- {"Samoa", "Samoa (le)", "WS", "WSM", "882"},
- {"Yemen", "Yémen (le)", "YE", "YEM", "887"},
- {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"},
-}
-
-// ISO4217List is the list of ISO currency codes
-var ISO4217List = []string{
- "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN",
- "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD",
- "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK",
- "DJF", "DKK", "DOP", "DZD",
- "EGP", "ERN", "ETB", "EUR",
- "FJD", "FKP",
- "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD",
- "HKD", "HNL", "HRK", "HTG", "HUF",
- "IDR", "ILS", "INR", "IQD", "IRR", "ISK",
- "JMD", "JOD", "JPY",
- "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT",
- "LAK", "LBP", "LKR", "LRD", "LSL", "LYD",
- "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN",
- "NAD", "NGN", "NIO", "NOK", "NPR", "NZD",
- "OMR",
- "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG",
- "QAR",
- "RON", "RSD", "RUB", "RWF",
- "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL",
- "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS",
- "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS",
- "VEF", "VND", "VUV",
- "WST",
- "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX",
- "YER",
- "ZAR", "ZMW", "ZWL",
-}
-
-// ISO693Entry stores ISO language codes
-type ISO693Entry struct {
- Alpha3bCode string
- Alpha2Code string
- English string
-}
-
-//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json
-var ISO693List = []ISO693Entry{
- {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"},
- {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"},
- {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"},
- {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"},
- {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"},
- {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"},
- {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"},
- {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"},
- {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"},
- {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"},
- {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"},
- {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"},
- {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"},
- {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"},
- {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"},
- {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"},
- {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"},
- {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"},
- {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"},
- {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"},
- {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"},
- {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"},
- {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"},
- {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"},
- {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"},
- {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"},
- {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"},
- {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"},
- {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"},
- {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"},
- {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"},
- {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"},
- {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"},
- {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"},
- {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"},
- {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"},
- {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"},
- {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"},
- {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"},
- {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"},
- {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"},
- {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"},
- {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"},
- {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"},
- {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"},
- {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"},
- {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"},
- {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"},
- {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"},
- {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"},
- {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"},
- {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"},
- {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"},
- {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"},
- {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"},
- {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"},
- {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"},
- {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"},
- {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"},
- {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"},
- {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"},
- {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"},
- {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"},
- {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"},
- {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"},
- {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"},
- {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"},
- {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"},
- {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"},
- {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"},
- {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"},
- {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"},
- {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"},
- {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"},
- {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"},
- {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"},
- {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"},
- {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"},
- {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"},
- {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"},
- {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"},
- {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"},
- {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"},
- {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"},
- {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"},
- {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"},
- {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"},
- {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"},
- {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"},
- {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"},
- {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"},
- {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"},
- {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"},
- {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"},
- {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"},
- {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"},
- {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"},
- {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"},
- {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"},
- {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"},
- {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"},
- {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"},
- {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"},
- {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"},
- {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"},
- {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"},
- {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"},
- {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"},
- {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"},
- {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"},
- {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"},
- {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"},
- {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"},
- {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"},
- {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"},
- {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"},
- {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"},
- {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"},
- {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"},
- {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"},
- {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"},
- {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"},
- {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"},
- {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"},
- {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"},
- {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"},
- {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"},
- {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"},
- {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"},
- {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"},
- {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"},
- {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"},
- {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"},
- {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"},
- {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"},
- {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"},
- {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"},
- {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"},
- {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"},
- {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"},
- {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"},
- {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"},
- {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"},
- {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"},
- {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"},
- {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"},
- {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"},
- {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"},
- {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"},
- {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"},
- {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"},
- {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"},
- {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"},
- {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"},
- {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"},
- {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"},
- {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"},
- {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"},
- {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"},
- {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"},
- {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"},
- {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"},
- {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"},
- {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"},
- {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"},
- {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"},
- {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"},
- {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"},
- {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"},
- {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"},
- {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"},
- {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"},
- {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"},
- {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"},
- {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"},
- {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"},
- {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"},
- {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"},
- {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"},
- {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"},
- {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"},
- {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"},
- {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"},
- {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"},
-}
diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go
deleted file mode 100644
index a0b706a74..000000000
--- a/vendor/github.com/asaskevich/govalidator/utils.go
+++ /dev/null
@@ -1,270 +0,0 @@
-package govalidator
-
-import (
- "errors"
- "fmt"
- "html"
- "math"
- "path"
- "regexp"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-// Contains check if the string contains the substring.
-func Contains(str, substring string) bool {
- return strings.Contains(str, substring)
-}
-
-// Matches check if string matches the pattern (pattern is regular expression)
-// In case of error return false
-func Matches(str, pattern string) bool {
- match, _ := regexp.MatchString(pattern, str)
- return match
-}
-
-// LeftTrim trim characters from the left-side of the input.
-// If second argument is empty, it's will be remove leading spaces.
-func LeftTrim(str, chars string) string {
- if chars == "" {
- return strings.TrimLeftFunc(str, unicode.IsSpace)
- }
- r, _ := regexp.Compile("^[" + chars + "]+")
- return r.ReplaceAllString(str, "")
-}
-
-// RightTrim trim characters from the right-side of the input.
-// If second argument is empty, it's will be remove spaces.
-func RightTrim(str, chars string) string {
- if chars == "" {
- return strings.TrimRightFunc(str, unicode.IsSpace)
- }
- r, _ := regexp.Compile("[" + chars + "]+$")
- return r.ReplaceAllString(str, "")
-}
-
-// Trim trim characters from both sides of the input.
-// If second argument is empty, it's will be remove spaces.
-func Trim(str, chars string) string {
- return LeftTrim(RightTrim(str, chars), chars)
-}
-
-// WhiteList remove characters that do not appear in the whitelist.
-func WhiteList(str, chars string) string {
- pattern := "[^" + chars + "]+"
- r, _ := regexp.Compile(pattern)
- return r.ReplaceAllString(str, "")
-}
-
-// BlackList remove characters that appear in the blacklist.
-func BlackList(str, chars string) string {
- pattern := "[" + chars + "]+"
- r, _ := regexp.Compile(pattern)
- return r.ReplaceAllString(str, "")
-}
-
-// StripLow remove characters with a numerical value < 32 and 127, mostly control characters.
-// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD).
-func StripLow(str string, keepNewLines bool) string {
- chars := ""
- if keepNewLines {
- chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F"
- } else {
- chars = "\x00-\x1F\x7F"
- }
- return BlackList(str, chars)
-}
-
-// ReplacePattern replace regular expression pattern in string
-func ReplacePattern(str, pattern, replace string) string {
- r, _ := regexp.Compile(pattern)
- return r.ReplaceAllString(str, replace)
-}
-
-// Escape replace <, >, & and " with HTML entities.
-var Escape = html.EscapeString
-
-func addSegment(inrune, segment []rune) []rune {
- if len(segment) == 0 {
- return inrune
- }
- if len(inrune) != 0 {
- inrune = append(inrune, '_')
- }
- inrune = append(inrune, segment...)
- return inrune
-}
-
-// UnderscoreToCamelCase converts from underscore separated form to camel case form.
-// Ex.: my_func => MyFunc
-func UnderscoreToCamelCase(s string) string {
- return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1)
-}
-
-// CamelCaseToUnderscore converts from camel case form to underscore separated form.
-// Ex.: MyFunc => my_func
-func CamelCaseToUnderscore(str string) string {
- var output []rune
- var segment []rune
- for _, r := range str {
-
- // not treat number as separate segment
- if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) {
- output = addSegment(output, segment)
- segment = nil
- }
- segment = append(segment, unicode.ToLower(r))
- }
- output = addSegment(output, segment)
- return string(output)
-}
-
-// Reverse return reversed string
-func Reverse(s string) string {
- r := []rune(s)
- for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
- r[i], r[j] = r[j], r[i]
- }
- return string(r)
-}
-
-// GetLines split string by "\n" and return array of lines
-func GetLines(s string) []string {
- return strings.Split(s, "\n")
-}
-
-// GetLine return specified line of multiline string
-func GetLine(s string, index int) (string, error) {
- lines := GetLines(s)
- if index < 0 || index >= len(lines) {
- return "", errors.New("line index out of bounds")
- }
- return lines[index], nil
-}
-
-// RemoveTags remove all tags from HTML string
-func RemoveTags(s string) string {
- return ReplacePattern(s, "<[^>]*>", "")
-}
-
-// SafeFileName return safe string that can be used in file names
-func SafeFileName(str string) string {
- name := strings.ToLower(str)
- name = path.Clean(path.Base(name))
- name = strings.Trim(name, " ")
- separators, err := regexp.Compile(`[ &_=+:]`)
- if err == nil {
- name = separators.ReplaceAllString(name, "-")
- }
- legal, err := regexp.Compile(`[^[:alnum:]-.]`)
- if err == nil {
- name = legal.ReplaceAllString(name, "")
- }
- for strings.Contains(name, "--") {
- name = strings.Replace(name, "--", "-", -1)
- }
- return name
-}
-
-// NormalizeEmail canonicalize an email address.
-// The local part of the email address is lowercased for all domains; the hostname is always lowercased and
-// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail).
-// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and
-// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are
-// normalized to @gmail.com.
-func NormalizeEmail(str string) (string, error) {
- if !IsEmail(str) {
- return "", fmt.Errorf("%s is not an email", str)
- }
- parts := strings.Split(str, "@")
- parts[0] = strings.ToLower(parts[0])
- parts[1] = strings.ToLower(parts[1])
- if parts[1] == "gmail.com" || parts[1] == "googlemail.com" {
- parts[1] = "gmail.com"
- parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0]
- }
- return strings.Join(parts, "@"), nil
-}
-
-// Truncate a string to the closest length without breaking words.
-func Truncate(str string, length int, ending string) string {
- var aftstr, befstr string
- if len(str) > length {
- words := strings.Fields(str)
- before, present := 0, 0
- for i := range words {
- befstr = aftstr
- before = present
- aftstr = aftstr + words[i] + " "
- present = len(aftstr)
- if present > length && i != 0 {
- if (length - before) < (present - length) {
- return Trim(befstr, " /\\.,\"'#!?&@+-") + ending
- }
- return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending
- }
- }
- }
-
- return str
-}
-
-// PadLeft pad left side of string if size of string is less then indicated pad length
-func PadLeft(str string, padStr string, padLen int) string {
- return buildPadStr(str, padStr, padLen, true, false)
-}
-
-// PadRight pad right side of string if size of string is less then indicated pad length
-func PadRight(str string, padStr string, padLen int) string {
- return buildPadStr(str, padStr, padLen, false, true)
-}
-
-// PadBoth pad sides of string if size of string is less then indicated pad length
-func PadBoth(str string, padStr string, padLen int) string {
- return buildPadStr(str, padStr, padLen, true, true)
-}
-
-// PadString either left, right or both sides, not the padding string can be unicode and more then one
-// character
-func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string {
-
- // When padded length is less then the current string size
- if padLen < utf8.RuneCountInString(str) {
- return str
- }
-
- padLen -= utf8.RuneCountInString(str)
-
- targetLen := padLen
-
- targetLenLeft := targetLen
- targetLenRight := targetLen
- if padLeft && padRight {
- targetLenLeft = padLen / 2
- targetLenRight = padLen - targetLenLeft
- }
-
- strToRepeatLen := utf8.RuneCountInString(padStr)
-
- repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen)))
- repeatedString := strings.Repeat(padStr, repeatTimes)
-
- leftSide := ""
- if padLeft {
- leftSide = repeatedString[0:targetLenLeft]
- }
-
- rightSide := ""
- if padRight {
- rightSide = repeatedString[0:targetLenRight]
- }
-
- return leftSide + str + rightSide
-}
-
-// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object
-func TruncatingErrorf(str string, args ...interface{}) error {
- n := strings.Count(str, "%s")
- return fmt.Errorf(str, args[:n]...)
-}
diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go
deleted file mode 100644
index b18bbcb4c..000000000
--- a/vendor/github.com/asaskevich/govalidator/validator.go
+++ /dev/null
@@ -1,1278 +0,0 @@
-// Package govalidator is package of validators and sanitizers for strings, structs and collections.
-package govalidator
-
-import (
- "bytes"
- "crypto/rsa"
- "crypto/x509"
- "encoding/base64"
- "encoding/json"
- "encoding/pem"
- "fmt"
- "io/ioutil"
- "net"
- "net/url"
- "reflect"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "time"
- "unicode"
- "unicode/utf8"
-)
-
-var (
- fieldsRequiredByDefault bool
- nilPtrAllowedByRequired = false
- notNumberRegexp = regexp.MustCompile("[^0-9]+")
- whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`)
- paramsRegexp = regexp.MustCompile(`\(.*\)$`)
-)
-
-const maxURLRuneCount = 2083
-const minURLRuneCount = 3
-const RF3339WithoutZone = "2006-01-02T15:04:05"
-
-// SetFieldsRequiredByDefault causes validation to fail when struct fields
-// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`).
-// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
-// type exampleStruct struct {
-// Name string ``
-// Email string `valid:"email"`
-// This, however, will only fail when Email is empty or an invalid email address:
-// type exampleStruct2 struct {
-// Name string `valid:"-"`
-// Email string `valid:"email"`
-// Lastly, this will only fail when Email is an invalid email address but not when it's empty:
-// type exampleStruct2 struct {
-// Name string `valid:"-"`
-// Email string `valid:"email,optional"`
-func SetFieldsRequiredByDefault(value bool) {
- fieldsRequiredByDefault = value
-}
-
-// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required.
-// The validation will still reject ptr fields in their zero value state. Example with this enabled:
-// type exampleStruct struct {
-// Name *string `valid:"required"`
-// With `Name` set to "", this will be considered invalid input and will cause a validation error.
-// With `Name` set to nil, this will be considered valid by validation.
-// By default this is disabled.
-func SetNilPtrAllowedByRequired(value bool) {
- nilPtrAllowedByRequired = value
-}
-
-// IsEmail check if the string is an email.
-func IsEmail(str string) bool {
- // TODO uppercase letters are not supported
- return rxEmail.MatchString(str)
-}
-
-// IsExistingEmail check if the string is an email of existing domain
-func IsExistingEmail(email string) bool {
-
- if len(email) < 6 || len(email) > 254 {
- return false
- }
- at := strings.LastIndex(email, "@")
- if at <= 0 || at > len(email)-3 {
- return false
- }
- user := email[:at]
- host := email[at+1:]
- if len(user) > 64 {
- return false
- }
- if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) {
- return false
- }
- switch host {
- case "localhost", "example.com":
- return true
- }
- if _, err := net.LookupMX(host); err != nil {
- if _, err := net.LookupIP(host); err != nil {
- return false
- }
- }
-
- return true
-}
-
-// IsURL check if the string is an URL.
-func IsURL(str string) bool {
- if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") {
- return false
- }
- strTemp := str
- if strings.Contains(str, ":") && !strings.Contains(str, "://") {
- // support no indicated urlscheme but with colon for port number
- // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString
- strTemp = "http://" + str
- }
- u, err := url.Parse(strTemp)
- if err != nil {
- return false
- }
- if strings.HasPrefix(u.Host, ".") {
- return false
- }
- if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) {
- return false
- }
- return rxURL.MatchString(str)
-}
-
-// IsRequestURL check if the string rawurl, assuming
-// it was received in an HTTP request, is a valid
-// URL confirm to RFC 3986
-func IsRequestURL(rawurl string) bool {
- url, err := url.ParseRequestURI(rawurl)
- if err != nil {
- return false //Couldn't even parse the rawurl
- }
- if len(url.Scheme) == 0 {
- return false //No Scheme found
- }
- return true
-}
-
-// IsRequestURI check if the string rawurl, assuming
-// it was received in an HTTP request, is an
-// absolute URI or an absolute path.
-func IsRequestURI(rawurl string) bool {
- _, err := url.ParseRequestURI(rawurl)
- return err == nil
-}
-
-// IsAlpha check if the string contains only letters (a-zA-Z). Empty string is valid.
-func IsAlpha(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxAlpha.MatchString(str)
-}
-
-//IsUTFLetter check if the string contains only unicode letter characters.
-//Similar to IsAlpha but for all languages. Empty string is valid.
-func IsUTFLetter(str string) bool {
- if IsNull(str) {
- return true
- }
-
- for _, c := range str {
- if !unicode.IsLetter(c) {
- return false
- }
- }
- return true
-
-}
-
-// IsAlphanumeric check if the string contains only letters and numbers. Empty string is valid.
-func IsAlphanumeric(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxAlphanumeric.MatchString(str)
-}
-
-// IsUTFLetterNumeric check if the string contains only unicode letters and numbers. Empty string is valid.
-func IsUTFLetterNumeric(str string) bool {
- if IsNull(str) {
- return true
- }
- for _, c := range str {
- if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok
- return false
- }
- }
- return true
-
-}
-
-// IsNumeric check if the string contains only numbers. Empty string is valid.
-func IsNumeric(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxNumeric.MatchString(str)
-}
-
-// IsUTFNumeric check if the string contains only unicode numbers of any kind.
-// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid.
-func IsUTFNumeric(str string) bool {
- if IsNull(str) {
- return true
- }
- if strings.IndexAny(str, "+-") > 0 {
- return false
- }
- if len(str) > 1 {
- str = strings.TrimPrefix(str, "-")
- str = strings.TrimPrefix(str, "+")
- }
- for _, c := range str {
- if !unicode.IsNumber(c) { //numbers && minus sign are ok
- return false
- }
- }
- return true
-
-}
-
-// IsUTFDigit check if the string contains only unicode radix-10 decimal digits. Empty string is valid.
-func IsUTFDigit(str string) bool {
- if IsNull(str) {
- return true
- }
- if strings.IndexAny(str, "+-") > 0 {
- return false
- }
- if len(str) > 1 {
- str = strings.TrimPrefix(str, "-")
- str = strings.TrimPrefix(str, "+")
- }
- for _, c := range str {
- if !unicode.IsDigit(c) { //digits && minus sign are ok
- return false
- }
- }
- return true
-
-}
-
-// IsHexadecimal check if the string is a hexadecimal number.
-func IsHexadecimal(str string) bool {
- return rxHexadecimal.MatchString(str)
-}
-
-// IsHexcolor check if the string is a hexadecimal color.
-func IsHexcolor(str string) bool {
- return rxHexcolor.MatchString(str)
-}
-
-// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB).
-func IsRGBcolor(str string) bool {
- return rxRGBcolor.MatchString(str)
-}
-
-// IsLowerCase check if the string is lowercase. Empty string is valid.
-func IsLowerCase(str string) bool {
- if IsNull(str) {
- return true
- }
- return str == strings.ToLower(str)
-}
-
-// IsUpperCase check if the string is uppercase. Empty string is valid.
-func IsUpperCase(str string) bool {
- if IsNull(str) {
- return true
- }
- return str == strings.ToUpper(str)
-}
-
-// HasLowerCase check if the string contains at least 1 lowercase. Empty string is valid.
-func HasLowerCase(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxHasLowerCase.MatchString(str)
-}
-
-// HasUpperCase check if the string contians as least 1 uppercase. Empty string is valid.
-func HasUpperCase(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxHasUpperCase.MatchString(str)
-}
-
-// IsInt check if the string is an integer. Empty string is valid.
-func IsInt(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxInt.MatchString(str)
-}
-
-// IsFloat check if the string is a float.
-func IsFloat(str string) bool {
- return str != "" && rxFloat.MatchString(str)
-}
-
-// IsDivisibleBy check if the string is a number that's divisible by another.
-// If second argument is not valid integer or zero, it's return false.
-// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero).
-func IsDivisibleBy(str, num string) bool {
- f, _ := ToFloat(str)
- p := int64(f)
- q, _ := ToInt(num)
- if q == 0 {
- return false
- }
- return (p == 0) || (p%q == 0)
-}
-
-// IsNull check if the string is null.
-func IsNull(str string) bool {
- return len(str) == 0
-}
-
-// HasWhitespaceOnly checks the string only contains whitespace
-func HasWhitespaceOnly(str string) bool {
- return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str)
-}
-
-// HasWhitespace checks if the string contains any whitespace
-func HasWhitespace(str string) bool {
- return len(str) > 0 && rxHasWhitespace.MatchString(str)
-}
-
-// IsByteLength check if the string's length (in bytes) falls in a range.
-func IsByteLength(str string, min, max int) bool {
- return len(str) >= min && len(str) <= max
-}
-
-// IsUUIDv3 check if the string is a UUID version 3.
-func IsUUIDv3(str string) bool {
- return rxUUID3.MatchString(str)
-}
-
-// IsUUIDv4 check if the string is a UUID version 4.
-func IsUUIDv4(str string) bool {
- return rxUUID4.MatchString(str)
-}
-
-// IsUUIDv5 check if the string is a UUID version 5.
-func IsUUIDv5(str string) bool {
- return rxUUID5.MatchString(str)
-}
-
-// IsUUID check if the string is a UUID (version 3, 4 or 5).
-func IsUUID(str string) bool {
- return rxUUID.MatchString(str)
-}
-
-// IsCreditCard check if the string is a credit card.
-func IsCreditCard(str string) bool {
- sanitized := notNumberRegexp.ReplaceAllString(str, "")
- if !rxCreditCard.MatchString(sanitized) {
- return false
- }
- var sum int64
- var digit string
- var tmpNum int64
- var shouldDouble bool
- for i := len(sanitized) - 1; i >= 0; i-- {
- digit = sanitized[i:(i + 1)]
- tmpNum, _ = ToInt(digit)
- if shouldDouble {
- tmpNum *= 2
- if tmpNum >= 10 {
- sum += ((tmpNum % 10) + 1)
- } else {
- sum += tmpNum
- }
- } else {
- sum += tmpNum
- }
- shouldDouble = !shouldDouble
- }
-
- return sum%10 == 0
-}
-
-// IsISBN10 check if the string is an ISBN version 10.
-func IsISBN10(str string) bool {
- return IsISBN(str, 10)
-}
-
-// IsISBN13 check if the string is an ISBN version 13.
-func IsISBN13(str string) bool {
- return IsISBN(str, 13)
-}
-
-// IsISBN check if the string is an ISBN (version 10 or 13).
-// If version value is not equal to 10 or 13, it will be check both variants.
-func IsISBN(str string, version int) bool {
- sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "")
- var checksum int32
- var i int32
- if version == 10 {
- if !rxISBN10.MatchString(sanitized) {
- return false
- }
- for i = 0; i < 9; i++ {
- checksum += (i + 1) * int32(sanitized[i]-'0')
- }
- if sanitized[9] == 'X' {
- checksum += 10 * 10
- } else {
- checksum += 10 * int32(sanitized[9]-'0')
- }
- if checksum%11 == 0 {
- return true
- }
- return false
- } else if version == 13 {
- if !rxISBN13.MatchString(sanitized) {
- return false
- }
- factor := []int32{1, 3}
- for i = 0; i < 12; i++ {
- checksum += factor[i%2] * int32(sanitized[i]-'0')
- }
- return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0
- }
- return IsISBN(str, 10) || IsISBN(str, 13)
-}
-
-// IsJSON check if the string is valid JSON (note: uses json.Unmarshal).
-func IsJSON(str string) bool {
- var js json.RawMessage
- return json.Unmarshal([]byte(str), &js) == nil
-}
-
-// IsMultibyte check if the string contains one or more multibyte chars. Empty string is valid.
-func IsMultibyte(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxMultibyte.MatchString(str)
-}
-
-// IsASCII check if the string contains ASCII chars only. Empty string is valid.
-func IsASCII(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxASCII.MatchString(str)
-}
-
-// IsPrintableASCII check if the string contains printable ASCII chars only. Empty string is valid.
-func IsPrintableASCII(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxPrintableASCII.MatchString(str)
-}
-
-// IsFullWidth check if the string contains any full-width chars. Empty string is valid.
-func IsFullWidth(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxFullWidth.MatchString(str)
-}
-
-// IsHalfWidth check if the string contains any half-width chars. Empty string is valid.
-func IsHalfWidth(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxHalfWidth.MatchString(str)
-}
-
-// IsVariableWidth check if the string contains a mixture of full and half-width chars. Empty string is valid.
-func IsVariableWidth(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str)
-}
-
-// IsBase64 check if a string is base64 encoded.
-func IsBase64(str string) bool {
- return rxBase64.MatchString(str)
-}
-
-// IsFilePath check is a string is Win or Unix file path and returns it's type.
-func IsFilePath(str string) (bool, int) {
- if rxWinPath.MatchString(str) {
- //check windows path limit see:
- // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath
- if len(str[3:]) > 32767 {
- return false, Win
- }
- return true, Win
- } else if rxUnixPath.MatchString(str) {
- return true, Unix
- }
- return false, Unknown
-}
-
-// IsDataURI checks if a string is base64 encoded data URI such as an image
-func IsDataURI(str string) bool {
- dataURI := strings.Split(str, ",")
- if !rxDataURI.MatchString(dataURI[0]) {
- return false
- }
- return IsBase64(dataURI[1])
-}
-
-// IsISO3166Alpha2 checks if a string is valid two-letter country code
-func IsISO3166Alpha2(str string) bool {
- for _, entry := range ISO3166List {
- if str == entry.Alpha2Code {
- return true
- }
- }
- return false
-}
-
-// IsISO3166Alpha3 checks if a string is valid three-letter country code
-func IsISO3166Alpha3(str string) bool {
- for _, entry := range ISO3166List {
- if str == entry.Alpha3Code {
- return true
- }
- }
- return false
-}
-
-// IsISO693Alpha2 checks if a string is valid two-letter language code
-func IsISO693Alpha2(str string) bool {
- for _, entry := range ISO693List {
- if str == entry.Alpha2Code {
- return true
- }
- }
- return false
-}
-
-// IsISO693Alpha3b checks if a string is valid three-letter language code
-func IsISO693Alpha3b(str string) bool {
- for _, entry := range ISO693List {
- if str == entry.Alpha3bCode {
- return true
- }
- }
- return false
-}
-
-// IsDNSName will validate the given string as a DNS name
-func IsDNSName(str string) bool {
- if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 {
- // constraints already violated
- return false
- }
- return !IsIP(str) && rxDNSName.MatchString(str)
-}
-
-// IsHash checks if a string is a hash of type algorithm.
-// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b']
-func IsHash(str string, algorithm string) bool {
- len := "0"
- algo := strings.ToLower(algorithm)
-
- if algo == "crc32" || algo == "crc32b" {
- len = "8"
- } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" {
- len = "32"
- } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" {
- len = "40"
- } else if algo == "tiger192" {
- len = "48"
- } else if algo == "sha256" {
- len = "64"
- } else if algo == "sha384" {
- len = "96"
- } else if algo == "sha512" {
- len = "128"
- } else {
- return false
- }
-
- return Matches(str, "^[a-f0-9]{"+len+"}$")
-}
-
-// IsDialString validates the given string for usage with the various Dial() functions
-func IsDialString(str string) bool {
-
- if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) {
- return true
- }
-
- return false
-}
-
-// IsIP checks if a string is either IP version 4 or 6.
-func IsIP(str string) bool {
- return net.ParseIP(str) != nil
-}
-
-// IsPort checks if a string represents a valid port
-func IsPort(str string) bool {
- if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 {
- return true
- }
- return false
-}
-
-// IsIPv4 check if the string is an IP version 4.
-func IsIPv4(str string) bool {
- ip := net.ParseIP(str)
- return ip != nil && strings.Contains(str, ".")
-}
-
-// IsIPv6 check if the string is an IP version 6.
-func IsIPv6(str string) bool {
- ip := net.ParseIP(str)
- return ip != nil && strings.Contains(str, ":")
-}
-
-// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6)
-func IsCIDR(str string) bool {
- _, _, err := net.ParseCIDR(str)
- return err == nil
-}
-
-// IsMAC check if a string is valid MAC address.
-// Possible MAC formats:
-// 01:23:45:67:89:ab
-// 01:23:45:67:89:ab:cd:ef
-// 01-23-45-67-89-ab
-// 01-23-45-67-89-ab-cd-ef
-// 0123.4567.89ab
-// 0123.4567.89ab.cdef
-func IsMAC(str string) bool {
- _, err := net.ParseMAC(str)
- return err == nil
-}
-
-// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name
-func IsHost(str string) bool {
- return IsIP(str) || IsDNSName(str)
-}
-
-// IsMongoID check if the string is a valid hex-encoded representation of a MongoDB ObjectId.
-func IsMongoID(str string) bool {
- return rxHexadecimal.MatchString(str) && (len(str) == 24)
-}
-
-// IsLatitude check if a string is valid latitude.
-func IsLatitude(str string) bool {
- return rxLatitude.MatchString(str)
-}
-
-// IsLongitude check if a string is valid longitude.
-func IsLongitude(str string) bool {
- return rxLongitude.MatchString(str)
-}
-
-// IsRsaPublicKey check if a string is valid public key with provided length
-func IsRsaPublicKey(str string, keylen int) bool {
- bb := bytes.NewBufferString(str)
- pemBytes, err := ioutil.ReadAll(bb)
- if err != nil {
- return false
- }
- block, _ := pem.Decode(pemBytes)
- if block != nil && block.Type != "PUBLIC KEY" {
- return false
- }
- var der []byte
-
- if block != nil {
- der = block.Bytes
- } else {
- der, err = base64.StdEncoding.DecodeString(str)
- if err != nil {
- return false
- }
- }
-
- key, err := x509.ParsePKIXPublicKey(der)
- if err != nil {
- return false
- }
- pubkey, ok := key.(*rsa.PublicKey)
- if !ok {
- return false
- }
- bitlen := len(pubkey.N.Bytes()) * 8
- return bitlen == int(keylen)
-}
-
-func toJSONName(tag string) string {
- if tag == "" {
- return ""
- }
-
- // JSON name always comes first. If there's no options then split[0] is
- // JSON name, if JSON name is not set, then split[0] is an empty string.
- split := strings.SplitN(tag, ",", 2)
-
- name := split[0]
-
- // However it is possible that the field is skipped when
- // (de-)serializing from/to JSON, in which case assume that there is no
- // tag name to use
- if name == "-" {
- return ""
- }
- return name
-}
-
-func PrependPathToErrors(err error, path string) error {
- switch err2 := err.(type) {
- case Error:
- err2.Path = append([]string{path}, err2.Path...)
- return err2
- case Errors:
- errors := err2.Errors()
- for i, err3 := range errors {
- errors[i] = PrependPathToErrors(err3, path)
- }
- return err2
- }
- fmt.Println(err)
- return err
-}
-
-// ValidateStruct use tags for fields.
-// result will be equal to `false` if there are any errors.
-func ValidateStruct(s interface{}) (bool, error) {
- if s == nil {
- return true, nil
- }
- result := true
- var err error
- val := reflect.ValueOf(s)
- if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr {
- val = val.Elem()
- }
- // we only accept structs
- if val.Kind() != reflect.Struct {
- return false, fmt.Errorf("function only accepts structs; got %s", val.Kind())
- }
- var errs Errors
- for i := 0; i < val.NumField(); i++ {
- valueField := val.Field(i)
- typeField := val.Type().Field(i)
- if typeField.PkgPath != "" {
- continue // Private field
- }
- structResult := true
- if valueField.Kind() == reflect.Interface {
- valueField = valueField.Elem()
- }
- if (valueField.Kind() == reflect.Struct ||
- (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) &&
- typeField.Tag.Get(tagName) != "-" {
- var err error
- structResult, err = ValidateStruct(valueField.Interface())
- if err != nil {
- err = PrependPathToErrors(err, typeField.Name)
- errs = append(errs, err)
- }
- }
- resultField, err2 := typeCheck(valueField, typeField, val, nil)
- if err2 != nil {
-
- // Replace structure name with JSON name if there is a tag on the variable
- jsonTag := toJSONName(typeField.Tag.Get("json"))
- if jsonTag != "" {
- switch jsonError := err2.(type) {
- case Error:
- jsonError.Name = jsonTag
- err2 = jsonError
- case Errors:
- for i2, err3 := range jsonError {
- switch customErr := err3.(type) {
- case Error:
- customErr.Name = jsonTag
- jsonError[i2] = customErr
- }
- }
-
- err2 = jsonError
- }
- }
-
- errs = append(errs, err2)
- }
- result = result && resultField && structResult
- }
- if len(errs) > 0 {
- err = errs
- }
- return result, err
-}
-
-// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""}
-func parseTagIntoMap(tag string) tagOptionsMap {
- optionsMap := make(tagOptionsMap)
- options := strings.Split(tag, ",")
-
- for i, option := range options {
- option = strings.TrimSpace(option)
-
- validationOptions := strings.Split(option, "~")
- if !isValidTag(validationOptions[0]) {
- continue
- }
- if len(validationOptions) == 2 {
- optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i}
- } else {
- optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i}
- }
- }
- return optionsMap
-}
-
-func isValidTag(s string) bool {
- if s == "" {
- return false
- }
- for _, c := range s {
- switch {
- case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
- // Backslash and quote chars are reserved, but
- // otherwise any punctuation chars are allowed
- // in a tag name.
- default:
- if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
- return false
- }
- }
- }
- return true
-}
-
-// IsSSN will validate the given string as a U.S. Social Security Number
-func IsSSN(str string) bool {
- if str == "" || len(str) != 11 {
- return false
- }
- return rxSSN.MatchString(str)
-}
-
-// IsSemver check if string is valid semantic version
-func IsSemver(str string) bool {
- return rxSemver.MatchString(str)
-}
-
-// IsTime check if string is valid according to given format
-func IsTime(str string, format string) bool {
- _, err := time.Parse(format, str)
- return err == nil
-}
-
-// IsRFC3339 check if string is valid timestamp value according to RFC3339
-func IsRFC3339(str string) bool {
- return IsTime(str, time.RFC3339)
-}
-
-// IsRFC3339WithoutZone check if string is valid timestamp value according to RFC3339 which excludes the timezone.
-func IsRFC3339WithoutZone(str string) bool {
- return IsTime(str, RF3339WithoutZone)
-}
-
-// IsISO4217 check if string is valid ISO currency code
-func IsISO4217(str string) bool {
- for _, currency := range ISO4217List {
- if str == currency {
- return true
- }
- }
-
- return false
-}
-
-// ByteLength check string's length
-func ByteLength(str string, params ...string) bool {
- if len(params) == 2 {
- min, _ := ToInt(params[0])
- max, _ := ToInt(params[1])
- return len(str) >= int(min) && len(str) <= int(max)
- }
-
- return false
-}
-
-// RuneLength check string's length
-// Alias for StringLength
-func RuneLength(str string, params ...string) bool {
- return StringLength(str, params...)
-}
-
-// IsRsaPub check whether string is valid RSA key
-// Alias for IsRsaPublicKey
-func IsRsaPub(str string, params ...string) bool {
- if len(params) == 1 {
- len, _ := ToInt(params[0])
- return IsRsaPublicKey(str, int(len))
- }
-
- return false
-}
-
-// StringMatches checks if a string matches a given pattern.
-func StringMatches(s string, params ...string) bool {
- if len(params) == 1 {
- pattern := params[0]
- return Matches(s, pattern)
- }
- return false
-}
-
-// StringLength check string's length (including multi byte strings)
-func StringLength(str string, params ...string) bool {
-
- if len(params) == 2 {
- strLength := utf8.RuneCountInString(str)
- min, _ := ToInt(params[0])
- max, _ := ToInt(params[1])
- return strLength >= int(min) && strLength <= int(max)
- }
-
- return false
-}
-
-// Range check string's length
-func Range(str string, params ...string) bool {
- if len(params) == 2 {
- value, _ := ToFloat(str)
- min, _ := ToFloat(params[0])
- max, _ := ToFloat(params[1])
- return InRange(value, min, max)
- }
-
- return false
-}
-
-func isInRaw(str string, params ...string) bool {
- if len(params) == 1 {
- rawParams := params[0]
-
- parsedParams := strings.Split(rawParams, "|")
-
- return IsIn(str, parsedParams...)
- }
-
- return false
-}
-
-// IsIn check if string str is a member of the set of strings params
-func IsIn(str string, params ...string) bool {
- for _, param := range params {
- if str == param {
- return true
- }
- }
-
- return false
-}
-
-func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) {
- if nilPtrAllowedByRequired {
- k := v.Kind()
- if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() {
- return true, nil
- }
- }
-
- if requiredOption, isRequired := options["required"]; isRequired {
- if len(requiredOption.customErrorMessage) > 0 {
- return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}}
- }
- return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}}
- } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional {
- return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}}
- }
- // not required and empty is valid
- return true, nil
-}
-
-func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) {
- if !v.IsValid() {
- return false, nil
- }
-
- tag := t.Tag.Get(tagName)
-
- // Check if the field should be ignored
- switch tag {
- case "":
- if v.Kind() != reflect.Slice && v.Kind() != reflect.Map {
- if !fieldsRequiredByDefault {
- return true, nil
- }
- return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}}
- }
- case "-":
- return true, nil
- }
-
- isRootType := false
- if options == nil {
- isRootType = true
- options = parseTagIntoMap(tag)
- }
-
- if isEmptyValue(v) {
- // an empty value is not validated, check only required
- isValid, resultErr = checkRequired(v, t, options)
- for key := range options {
- delete(options, key)
- }
- return isValid, resultErr
- }
-
- var customTypeErrors Errors
- optionsOrder := options.orderedKeys()
- for _, validatorName := range optionsOrder {
- validatorStruct := options[validatorName]
- if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok {
- delete(options, validatorName)
-
- if result := validatefunc(v.Interface(), o.Interface()); !result {
- if len(validatorStruct.customErrorMessage) > 0 {
- customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)})
- continue
- }
- customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)})
- }
- }
- }
-
- if len(customTypeErrors.Errors()) > 0 {
- return false, customTypeErrors
- }
-
- if isRootType {
- // Ensure that we've checked the value by all specified validators before report that the value is valid
- defer func() {
- delete(options, "optional")
- delete(options, "required")
-
- if isValid && resultErr == nil && len(options) != 0 {
- optionsOrder := options.orderedKeys()
- for _, validator := range optionsOrder {
- isValid = false
- resultErr = Error{t.Name, fmt.Errorf(
- "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}}
- return
- }
- }
- }()
- }
-
- switch v.Kind() {
- case reflect.Bool,
- reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
- reflect.Float32, reflect.Float64,
- reflect.String:
- // for each tag option check the map of validator functions
- for _, validatorSpec := range optionsOrder {
- validatorStruct := options[validatorSpec]
- var negate bool
- validator := validatorSpec
- customMsgExists := len(validatorStruct.customErrorMessage) > 0
-
- // Check whether the tag looks like '!something' or 'something'
- if validator[0] == '!' {
- validator = validator[1:]
- negate = true
- }
-
- // Check for param validators
- for key, value := range ParamTagRegexMap {
- ps := value.FindStringSubmatch(validator)
- if len(ps) == 0 {
- continue
- }
-
- validatefunc, ok := ParamTagMap[key]
- if !ok {
- continue
- }
-
- delete(options, validatorSpec)
-
- switch v.Kind() {
- case reflect.String,
- reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
- reflect.Float32, reflect.Float64:
-
- field := fmt.Sprint(v) // make value into string, then validate with regex
- if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) {
- if customMsgExists {
- return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- if negate {
- return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- default:
- // type not yet supported, fail
- return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}}
- }
- }
-
- if validatefunc, ok := TagMap[validator]; ok {
- delete(options, validatorSpec)
-
- switch v.Kind() {
- case reflect.String,
- reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
- reflect.Float32, reflect.Float64:
- field := fmt.Sprint(v) // make value into string, then validate with regex
- if result := validatefunc(field); !result && !negate || result && negate {
- if customMsgExists {
- return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- if negate {
- return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- default:
- //Not Yet Supported Types (Fail here!)
- err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v)
- return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}}
- }
- }
- }
- return true, nil
- case reflect.Map:
- if v.Type().Key().Kind() != reflect.String {
- return false, &UnsupportedTypeError{v.Type()}
- }
- var sv stringValues
- sv = v.MapKeys()
- sort.Sort(sv)
- result := true
- for i, k := range sv {
- var resultItem bool
- var err error
- if v.MapIndex(k).Kind() != reflect.Struct {
- resultItem, err = typeCheck(v.MapIndex(k), t, o, options)
- if err != nil {
- return false, err
- }
- } else {
- resultItem, err = ValidateStruct(v.MapIndex(k).Interface())
- if err != nil {
- err = PrependPathToErrors(err, t.Name+"."+sv[i].Interface().(string))
- return false, err
- }
- }
- result = result && resultItem
- }
- return result, nil
- case reflect.Slice, reflect.Array:
- result := true
- for i := 0; i < v.Len(); i++ {
- var resultItem bool
- var err error
- if v.Index(i).Kind() != reflect.Struct {
- resultItem, err = typeCheck(v.Index(i), t, o, options)
- if err != nil {
- return false, err
- }
- } else {
- resultItem, err = ValidateStruct(v.Index(i).Interface())
- if err != nil {
- err = PrependPathToErrors(err, t.Name+"."+strconv.Itoa(i))
- return false, err
- }
- }
- result = result && resultItem
- }
- return result, nil
- case reflect.Interface:
- // If the value is an interface then encode its element
- if v.IsNil() {
- return true, nil
- }
- return ValidateStruct(v.Interface())
- case reflect.Ptr:
- // If the value is a pointer then check its element
- if v.IsNil() {
- return true, nil
- }
- return typeCheck(v.Elem(), t, o, options)
- case reflect.Struct:
- return ValidateStruct(v.Interface())
- default:
- return false, &UnsupportedTypeError{v.Type()}
- }
-}
-
-func stripParams(validatorString string) string {
- return paramsRegexp.ReplaceAllString(validatorString, "")
-}
-
-func isEmptyValue(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.String, reflect.Array:
- return v.Len() == 0
- case reflect.Map, reflect.Slice:
- return v.Len() == 0 || v.IsNil()
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- }
-
- return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface())
-}
-
-// ErrorByField returns error for specified field of the struct
-// validated by ValidateStruct or empty string if there are no errors
-// or this field doesn't exists or doesn't have any errors.
-func ErrorByField(e error, field string) string {
- if e == nil {
- return ""
- }
- return ErrorsByField(e)[field]
-}
-
-// ErrorsByField returns map of errors of the struct validated
-// by ValidateStruct or empty map if there are no errors.
-func ErrorsByField(e error) map[string]string {
- m := make(map[string]string)
- if e == nil {
- return m
- }
- // prototype for ValidateStruct
-
- switch e.(type) {
- case Error:
- m[e.(Error).Name] = e.(Error).Err.Error()
- case Errors:
- for _, item := range e.(Errors).Errors() {
- n := ErrorsByField(item)
- for k, v := range n {
- m[k] = v
- }
- }
- }
-
- return m
-}
-
-// Error returns string equivalent for reflect.Type
-func (e *UnsupportedTypeError) Error() string {
- return "validator: unsupported type: " + e.Type.String()
-}
-
-func (sv stringValues) Len() int { return len(sv) }
-func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
-func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
-func (sv stringValues) get(i int) string { return sv[i].String() }
diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml
deleted file mode 100644
index cac7a5fcf..000000000
--- a/vendor/github.com/asaskevich/govalidator/wercker.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-box: golang
-build:
- steps:
- - setup-go-workspace
-
- - script:
- name: go get
- code: |
- go version
- go get -t ./...
-
- - script:
- name: go test
- code: |
- go test -race ./...
diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md
index 16abdfc08..9433004a2 100644
--- a/vendor/github.com/cenkalti/backoff/v4/README.md
+++ b/vendor/github.com/cenkalti/backoff/v4/README.md
@@ -1,4 +1,4 @@
-# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
+# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
@@ -21,8 +21,6 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
-[travis]: https://travis-ci.org/cenkalti/backoff
-[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go
index 2c56c1e71..aac99f196 100644
--- a/vendor/github.com/cenkalti/backoff/v4/exponential.go
+++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go
@@ -71,6 +71,9 @@ type Clock interface {
Now() time.Time
}
+// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
+type ExponentialBackOffOpts func(*ExponentialBackOff)
+
// Default values for ExponentialBackOff.
const (
DefaultInitialInterval = 500 * time.Millisecond
@@ -81,7 +84,7 @@ const (
)
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
-func NewExponentialBackOff() *ExponentialBackOff {
+func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
b := &ExponentialBackOff{
InitialInterval: DefaultInitialInterval,
RandomizationFactor: DefaultRandomizationFactor,
@@ -91,10 +94,62 @@ func NewExponentialBackOff() *ExponentialBackOff {
Stop: Stop,
Clock: SystemClock,
}
+ for _, fn := range opts {
+ fn(b)
+ }
b.Reset()
return b
}
+// WithInitialInterval sets the initial interval between retries.
+func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.InitialInterval = duration
+ }
+}
+
+// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
+func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.RandomizationFactor = randomizationFactor
+ }
+}
+
+// WithMultiplier sets the multiplier for increasing the interval after each retry.
+func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Multiplier = multiplier
+ }
+}
+
+// WithMaxInterval sets the maximum interval between retries.
+func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.MaxInterval = duration
+ }
+}
+
+// WithMaxElapsedTime sets the maximum total time for retries.
+func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.MaxElapsedTime = duration
+ }
+}
+
+// WithRetryStopDuration sets the duration after which retries should stop.
+func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Stop = duration
+ }
+}
+
+// WithClockProvider sets the clock used to measure time.
+func WithClockProvider(clock Clock) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Clock = clock
+ }
+}
+
type systemClock struct{}
func (t systemClock) Now() time.Time {
diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
index 5edd5a7ca..6f24dfff5 100644
--- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
+++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
@@ -1,5 +1,26 @@
# Change history of go-restful
+## [v3.12.2] - 2025-02-21
+
+- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt)
+
+## [v3.12.1] - 2024-05-28
+
+- fix misroute when dealing multiple webservice with regex (#549) (thanks Haitao Chen)
+
+## [v3.12.0] - 2024-03-11
+
+- add Flush method #529 (#538)
+- fix: Improper handling of empty POST requests (#543)
+
+## [v3.11.3] - 2024-01-09
+
+- better not have 2 tags on one commit
+
+## [v3.11.1, v3.11.2] - 2024-01-09
+
+- fix by restoring custom JSON handler functions (Mike Beaumont #540)
+
## [v3.11.0] - 2023-08-19
- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled.
diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md
index e3e30080e..3fb40d198 100644
--- a/vendor/github.com/emicklei/go-restful/v3/README.md
+++ b/vendor/github.com/emicklei/go-restful/v3/README.md
@@ -2,9 +2,8 @@ go-restful
==========
package for building REST-style Web Services using Google Go
-[](https://travis-ci.org/emicklei/go-restful)
[](https://goreportcard.com/report/github.com/emicklei/go-restful)
-[](https://pkg.go.dev/github.com/emicklei/go-restful)
+[](https://pkg.go.dev/github.com/emicklei/go-restful/v3)
[](https://codecov.io/gh/emicklei/go-restful)
- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples)
@@ -95,8 +94,7 @@ There are several hooks to customize the behavior of the go-restful package.
- Trace logging
- Compression
- Encoders for other serializers
-- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .`
-- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/`
+- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/`
## Resources
diff --git a/vendor/github.com/emicklei/go-restful/v3/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go
index 1ff239f99..80adf55fd 100644
--- a/vendor/github.com/emicklei/go-restful/v3/compress.go
+++ b/vendor/github.com/emicklei/go-restful/v3/compress.go
@@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
return c.writer.(http.CloseNotifier).CloseNotify()
}
+// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it.
+func (c *CompressingResponseWriter) Flush() {
+ flusher, ok := c.writer.(http.Flusher)
+ if !ok {
+ // writer doesn't support http.Flusher interface
+ return
+ }
+ flusher.Flush()
+}
+
// Close the underlying compressor
func (c *CompressingResponseWriter) Close() error {
if c.isCompressorClosed() {
diff --git a/vendor/github.com/emicklei/go-restful/v3/curly.go b/vendor/github.com/emicklei/go-restful/v3/curly.go
index ba1fc5d5f..6fd2bcd5a 100644
--- a/vendor/github.com/emicklei/go-restful/v3/curly.go
+++ b/vendor/github.com/emicklei/go-restful/v3/curly.go
@@ -46,10 +46,10 @@ func (c CurlyRouter) SelectRoute(
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
candidates := make(sortableCurlyRoutes, 0, 8)
- for _, each := range ws.routes {
- matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb)
+ for _, eachRoute := range ws.routes {
+ matches, paramCount, staticCount := c.matchesRouteByPathTokens(eachRoute.pathParts, requestTokens, eachRoute.hasCustomVerb)
if matches {
- candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
+ candidates.add(curlyRoute{eachRoute, paramCount, staticCount}) // TODO make sure Routes() return pointers?
}
}
sort.Sort(candidates)
@@ -72,7 +72,7 @@ func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []strin
return false, 0, 0
}
requestToken := requestTokens[i]
- if routeHasCustomVerb && hasCustomVerb(routeToken){
+ if routeHasCustomVerb && hasCustomVerb(routeToken) {
if !isMatchCustomVerb(routeToken, requestToken) {
return false, 0, 0
}
@@ -129,44 +129,52 @@ func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpReques
// detectWebService returns the best matching webService given the list of path tokens.
// see also computeWebserviceScore
func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
- var best *WebService
+ var bestWs *WebService
score := -1
- for _, each := range webServices {
- matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
+ for _, eachWS := range webServices {
+ matches, eachScore := c.computeWebserviceScore(requestTokens, eachWS.pathExpr.tokens)
if matches && (eachScore > score) {
- best = each
+ bestWs = eachWS
score = eachScore
}
}
- return best
+ return bestWs
}
// computeWebserviceScore returns whether tokens match and
// the weighted score of the longest matching consecutive tokens from the beginning.
-func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
- if len(tokens) > len(requestTokens) {
+func (c CurlyRouter) computeWebserviceScore(requestTokens []string, routeTokens []string) (bool, int) {
+ if len(routeTokens) > len(requestTokens) {
return false, 0
}
score := 0
- for i := 0; i < len(tokens); i++ {
- each := requestTokens[i]
- other := tokens[i]
- if len(each) == 0 && len(other) == 0 {
+ for i := 0; i < len(routeTokens); i++ {
+ eachRequestToken := requestTokens[i]
+ eachRouteToken := routeTokens[i]
+ if len(eachRequestToken) == 0 && len(eachRouteToken) == 0 {
score++
continue
}
- if len(other) > 0 && strings.HasPrefix(other, "{") {
+ if len(eachRouteToken) > 0 && strings.HasPrefix(eachRouteToken, "{") {
// no empty match
- if len(each) == 0 {
+ if len(eachRequestToken) == 0 {
return false, score
}
- score += 1
+ score++
+
+ if colon := strings.Index(eachRouteToken, ":"); colon != -1 {
+ // match by regex
+ matchesToken, _ := c.regularMatchesPathToken(eachRouteToken, colon, eachRequestToken)
+ if matchesToken {
+ score++ // extra score for regex match
+ }
+ }
} else {
// not a parameter
- if each != other {
+ if eachRequestToken != eachRouteToken {
return false, score
}
- score += (len(tokens) - i) * 10 //fuzzy
+ score += (len(routeTokens) - i) * 10 //fuzzy
}
}
return true, score
diff --git a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go
index 66dfc824f..9808752ac 100644
--- a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go
+++ b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go
@@ -5,11 +5,18 @@ package restful
// that can be found in the LICENSE file.
import (
+ "encoding/json"
"encoding/xml"
"strings"
"sync"
)
+var (
+ MarshalIndent = json.MarshalIndent
+ NewDecoder = json.NewDecoder
+ NewEncoder = json.NewEncoder
+)
+
// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
type EntityReaderWriter interface {
// Read a serialized version of the value from the request.
diff --git a/vendor/github.com/emicklei/go-restful/v3/json.go b/vendor/github.com/emicklei/go-restful/v3/json.go
deleted file mode 100644
index 871165166..000000000
--- a/vendor/github.com/emicklei/go-restful/v3/json.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build !jsoniter
-
-package restful
-
-import "encoding/json"
-
-var (
- MarshalIndent = json.MarshalIndent
- NewDecoder = json.NewDecoder
- NewEncoder = json.NewEncoder
-)
diff --git a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go b/vendor/github.com/emicklei/go-restful/v3/jsoniter.go
deleted file mode 100644
index 11b8f8ae7..000000000
--- a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build jsoniter
-
-package restful
-
-import "github.com/json-iterator/go"
-
-var (
- json = jsoniter.ConfigCompatibleWithStandardLibrary
- MarshalIndent = json.MarshalIndent
- NewDecoder = json.NewDecoder
- NewEncoder = json.NewEncoder
-)
diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go
index 07a0c91e9..7f04bd905 100644
--- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go
+++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go
@@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma
return params
}
-// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
+// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
candidates := make([]*Route, 0, 8)
for i, each := range routes {
@@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
if trace {
traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType)
}
- if httpRequest.ContentLength > 0 {
- return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
- }
+ return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
}
// accept
@@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
for _, candidate := range previous {
available = append(available, candidate.Produces...)
}
- // if POST,PUT,PATCH without body
- method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length")
- if (method == http.MethodPost ||
- method == http.MethodPut ||
- method == http.MethodPatch) && length == "" {
- return nil, NewError(
- http.StatusUnsupportedMediaType,
- fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")),
- )
- }
return nil, NewError(
http.StatusNotAcceptable,
- fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")),
- )
+ fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")))
}
// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
return candidates[0], nil
diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go
index 306c44be7..a2056e2ac 100644
--- a/vendor/github.com/emicklei/go-restful/v3/route.go
+++ b/vendor/github.com/emicklei/go-restful/v3/route.go
@@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
}
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
+// If the route does not specify Consumes then return true (*/*).
+// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE.
func (r Route) matchesContentType(mimeTypes string) bool {
if len(r.Consumes) == 0 {
diff --git a/vendor/github.com/felixge/httpsnoop/.travis.yml b/vendor/github.com/felixge/httpsnoop/.travis.yml
deleted file mode 100644
index bfc421200..000000000
--- a/vendor/github.com/felixge/httpsnoop/.travis.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-language: go
-
-go:
- - 1.6
- - 1.7
- - 1.8
diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile
index 2d84889ae..4e12afdd9 100644
--- a/vendor/github.com/felixge/httpsnoop/Makefile
+++ b/vendor/github.com/felixge/httpsnoop/Makefile
@@ -1,7 +1,7 @@
.PHONY: ci generate clean
ci: clean generate
- go test -v ./...
+ go test -race -v ./...
generate:
go generate .
diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md
index ddcecd13e..cf6b42f3d 100644
--- a/vendor/github.com/felixge/httpsnoop/README.md
+++ b/vendor/github.com/felixge/httpsnoop/README.md
@@ -7,8 +7,8 @@ http.Handlers.
Doing this requires non-trivial wrapping of the http.ResponseWriter interface,
which is also exposed for users interested in a more low-level API.
-[](https://godoc.org/github.com/felixge/httpsnoop)
-[](https://travis-ci.org/felixge/httpsnoop)
+[](https://pkg.go.dev/github.com/felixge/httpsnoop)
+[](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml)
## Usage Example
diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go
index b77cc7c00..bec7b71b3 100644
--- a/vendor/github.com/felixge/httpsnoop/capture_metrics.go
+++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go
@@ -52,7 +52,7 @@ func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWri
return func(code int) {
next(code)
- if !headerWritten {
+ if !(code >= 100 && code <= 199) && !headerWritten {
m.Code = code
headerWritten = true
}
diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
index 31cbdfb8e..101cedde6 100644
--- a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
+++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
@@ -1,5 +1,5 @@
// +build go1.8
-// Code generated by "httpsnoop/codegen"; DO NOT EDIT
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
package httpsnoop
diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
index ab99c07c7..e0951df15 100644
--- a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
+++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
@@ -1,5 +1,5 @@
// +build !go1.8
-// Code generated by "httpsnoop/codegen"; DO NOT EDIT
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
package httpsnoop
diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml
index ffc7b992b..7f257e99a 100644
--- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml
+++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml
@@ -1,7 +1,7 @@
freebsd_task:
name: 'FreeBSD'
freebsd_instance:
- image_family: freebsd-13-2
+ image_family: freebsd-14-2
install_script:
- pkg update -f
- pkg install -y go
@@ -9,5 +9,6 @@ freebsd_task:
# run tests as user "cirrus" instead of root
- pw useradd cirrus -m
- chown -R cirrus:cirrus .
- - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
- - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
+ - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
+ - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
+ - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./...
diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig
deleted file mode 100644
index fad895851..000000000
--- a/vendor/github.com/fsnotify/fsnotify/.editorconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-root = true
-
-[*.go]
-indent_style = tab
-indent_size = 4
-insert_final_newline = true
-
-[*.{yml,yaml}]
-indent_style = space
-indent_size = 2
-insert_final_newline = true
-trim_trailing_whitespace = true
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes
deleted file mode 100644
index 32f1001be..000000000
--- a/vendor/github.com/fsnotify/fsnotify/.gitattributes
+++ /dev/null
@@ -1 +0,0 @@
-go.sum linguist-generated
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore
index 391cc076b..daea9dd6d 100644
--- a/vendor/github.com/fsnotify/fsnotify/.gitignore
+++ b/vendor/github.com/fsnotify/fsnotify/.gitignore
@@ -5,3 +5,6 @@
# Output of go build ./cmd/fsnotify
/fsnotify
/fsnotify.exe
+
+/test/kqueue
+/test/a.out
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
index e0e575754..6468d2cf4 100644
--- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -1,8 +1,69 @@
# Changelog
-Unreleased
-----------
-Nothing yet.
+1.9.0 2024-04-04
+----------------
+
+### Changes and fixes
+
+- all: make BufferedWatcher buffered again ([#657])
+
+- inotify: fix race when adding/removing watches while a watched path is being
+ deleted ([#678], [#686])
+
+- inotify: don't send empty event if a watched path is unmounted ([#655])
+
+- inotify: don't register duplicate watches when watching both a symlink and its
+ target; previously that would get "half-added" and removing the second would
+ panic ([#679])
+
+- kqueue: fix watching relative symlinks ([#681])
+
+- kqueue: correctly mark pre-existing entries when watching a link to a dir on
+ kqueue ([#682])
+
+- illumos: don't send error if changed file is deleted while processing the
+ event ([#678])
+
+
+[#657]: https://github.com/fsnotify/fsnotify/pull/657
+[#678]: https://github.com/fsnotify/fsnotify/pull/678
+[#686]: https://github.com/fsnotify/fsnotify/pull/686
+[#655]: https://github.com/fsnotify/fsnotify/pull/655
+[#681]: https://github.com/fsnotify/fsnotify/pull/681
+[#679]: https://github.com/fsnotify/fsnotify/pull/679
+[#682]: https://github.com/fsnotify/fsnotify/pull/682
+
+1.8.0 2024-10-31
+----------------
+
+### Additions
+
+- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619])
+
+### Changes and fixes
+
+- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610])
+
+- kqueue: ignore events with Ident=0 ([#590])
+
+- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617])
+
+- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625])
+
+- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620])
+
+- inotify: fix panic when calling Remove() in a goroutine ([#650])
+
+- fen: allow watching subdirectories of watched directories ([#621])
+
+[#590]: https://github.com/fsnotify/fsnotify/pull/590
+[#610]: https://github.com/fsnotify/fsnotify/pull/610
+[#617]: https://github.com/fsnotify/fsnotify/pull/617
+[#619]: https://github.com/fsnotify/fsnotify/pull/619
+[#620]: https://github.com/fsnotify/fsnotify/pull/620
+[#621]: https://github.com/fsnotify/fsnotify/pull/621
+[#625]: https://github.com/fsnotify/fsnotify/pull/625
+[#650]: https://github.com/fsnotify/fsnotify/pull/650
1.7.0 - 2023-10-22
------------------
diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
index ea379759d..4cc40fa59 100644
--- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
+++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -1,7 +1,7 @@
Thank you for your interest in contributing to fsnotify! We try to review and
merge PRs in a reasonable timeframe, but please be aware that:
-- To avoid "wasted" work, please discus changes on the issue tracker first. You
+- To avoid "wasted" work, please discuss changes on the issue tracker first. You
can just send PRs, but they may end up being rejected for one reason or the
other.
@@ -20,6 +20,125 @@ platforms. Testing different platforms locally can be done with something like
Use the `-short` flag to make the "stress test" run faster.
+Writing new tests
+-----------------
+Scripts in the testdata directory allow creating test cases in a "shell-like"
+syntax. The basic format is:
+
+ script
+
+ Output:
+ desired output
+
+For example:
+
+ # Create a new empty file with some data.
+ watch /
+ echo data >/file
+
+ Output:
+ create /file
+ write /file
+
+Just create a new file to add a new test; select which tests to run with
+`-run TestScript/[path]`.
+
+script
+------
+The script is a "shell-like" script:
+
+ cmd arg arg
+
+Comments are supported with `#`:
+
+ # Comment
+ cmd arg arg # Comment
+
+All operations are done in a temp directory; a path like "/foo" is rewritten to
+"/tmp/TestFoo/foo".
+
+Arguments can be quoted with `"` or `'`; there are no escapes and they're
+functionally identical right now, but this may change in the future, so best to
+assume shell-like rules.
+
+ touch "/file with spaces"
+
+End-of-line escapes with `\` are not supported.
+
+### Supported commands
+
+ watch path [ops] # Watch the path, reporting events for it. Nothing is
+ # watched by default. Optionally a list of ops can be
+ # given, as with AddWith(path, WithOps(...)).
+ unwatch path # Stop watching the path.
+ watchlist n # Assert watchlist length.
+
+ stop # Stop running the script; for debugging.
+ debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in
+ parallel by default, so -parallel=1 is probably a good
+ idea).
+ print [any strings] # Print text to stdout; for debugging.
+
+ touch path
+ mkdir [-p] dir
+ ln -s target link # Only ln -s supported.
+ mkfifo path
+ mknod dev path
+ mv src dst
+ rm [-r] path
+ chmod mode path # Octal only
+ sleep time-in-ms
+
+ cat path # Read path (does nothing with the data; just reads it).
+ echo str >>path # Append "str" to "path".
+ echo str >path # Truncate "path" and write "str".
+
+ require reason # Skip the test if "reason" is true; "skip" and
+ skip reason # "require" behave identical; it supports both for
+ # readability. Possible reasons are:
+ #
+ # always Always skip this test.
+ # symlink Symlinks are supported (requires admin
+ # permissions on Windows).
+ # mkfifo Platform doesn't support FIFO named sockets.
+ # mknod Platform doesn't support device nodes.
+
+
+output
+------
+After `Output:` the desired output is given; this is indented by convention, but
+that's not required.
+
+The format of that is:
+
+ # Comment
+ event path # Comment
+
+ system:
+ event path
+ system2:
+ event path
+
+Every event is one line, and any whitespace between the event and path are
+ignored. The path can optionally be surrounded in ". Anything after a "#" is
+ignored.
+
+Platform-specific tests can be added after GOOS; for example:
+
+ watch /
+ touch /file
+
+ Output:
+ # Tested if nothing else matches
+ create /file
+
+ # Windows-specific test.
+ windows:
+ write /file
+
+You can specify multiple platforms with a comma (e.g. "windows, linux:").
+"kqueue" is a shortcut for all kqueue systems (BSD, macOS).
+
[goon]: https://github.com/arp242/goon
[Vagrant]: https://www.vagrantup.com/
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
index e480733d1..1f4eb583d 100644
--- a/vendor/github.com/fsnotify/fsnotify/README.md
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -15,7 +15,6 @@ Platform support:
| ReadDirectoryChangesW | Windows | Supported |
| FEN | illumos | Supported |
| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
-| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment |
| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
@@ -25,7 +24,6 @@ untested.
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
-[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129
Usage
-----
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go
index 28497f1dd..57fc69284 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go
@@ -1,162 +1,44 @@
//go:build solaris
-// +build solaris
-// Note: the documentation on the Watcher type and methods is generated from
-// mkdoc.zsh
+// FEN backend for illumos (supported) and Solaris (untested, but should work).
+//
+// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create
package fsnotify
import (
"errors"
"fmt"
+ "io/fs"
"os"
"path/filepath"
"sync"
+ "time"
+ "github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # Windows notes
-//
-// Paths can be added as "C:\path\to\dir", but forward slashes
-// ("C:/path/to/dir") will also work.
-//
-// When a watched directory is removed it will always send an event for the
-// directory itself, but may not send events for all files in that directory.
-// Sometimes it will send events for all times, sometimes it will send no
-// events, and often only for some files.
-//
-// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
-// value that is guaranteed to work with SMB filesystems. If you have many
-// events in quick succession this may not be enough, and you will have to use
-// [WithBufferSize] to increase the value.
-type Watcher struct {
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, and you may
- // want to wait until you've stopped receiving them
- // (see the dedup example in cmd/fsnotify).
- //
- // Some systems may send Write event for directories
- // when the directory content changes.
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // when a file is truncated. On Windows it's never
- // sent.
+type fen struct {
+ *shared
Events chan Event
-
- // Errors sends any errors.
- //
- // ErrEventOverflow is used to indicate there are too many events:
- //
- // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
- // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
- // - kqueue, fen: Not used.
Errors chan error
mu sync.Mutex
port *unix.EventPort
- done chan struct{} // Channel for sending a "quit message" to the reader goroutine
- dirs map[string]struct{} // Explicitly watched directories
- watches map[string]struct{} // Explicitly watched non-directories
+ dirs map[string]Op // Explicitly watched directories
+ watches map[string]Op // Explicitly watched non-directories
}
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
- return NewBufferedWatcher(0)
-}
+var defaultBufferSize = 0
-// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
-// channel.
-//
-// The main use case for this is situations with a very large number of events
-// where the kernel buffer size can't be increased (e.g. due to lack of
-// permissions). An unbuffered Watcher will perform better for almost all use
-// cases, and whenever possible you will be better off increasing the kernel
-// buffers instead of adding a large userspace buffer.
-func NewBufferedWatcher(sz uint) (*Watcher, error) {
- w := &Watcher{
- Events: make(chan Event, sz),
- Errors: make(chan error),
- dirs: make(map[string]struct{}),
- watches: make(map[string]struct{}),
- done: make(chan struct{}),
+func newBackend(ev chan Event, errs chan error) (backend, error) {
+ w := &fen{
+ shared: newShared(ev, errs),
+ Events: ev,
+ Errors: errs,
+ dirs: make(map[string]Op),
+ watches: make(map[string]Op),
}
var err error
@@ -169,104 +51,28 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) {
return w, nil
}
-// sendEvent attempts to send an event to the user, returning true if the event
-// was put in the channel successfully and false if the watcher has been closed.
-func (w *Watcher) sendEvent(name string, op Op) (sent bool) {
- select {
- case w.Events <- Event{Name: name, Op: op}:
- return true
- case <-w.done:
- return false
- }
-}
-
-// sendError attempts to send an error to the user, returning true if the error
-// was put in the channel successfully and false if the watcher has been closed.
-func (w *Watcher) sendError(err error) (sent bool) {
- select {
- case w.Errors <- err:
- return true
- case <-w.done:
- return false
- }
-}
-
-func (w *Watcher) isClosed() bool {
- select {
- case <-w.done:
- return true
- default:
- return false
- }
-}
-
-// Close removes all watches and closes the Events channel.
-func (w *Watcher) Close() error {
- // Take the lock used by associateFile to prevent lingering events from
- // being processed after the close
- w.mu.Lock()
- defer w.mu.Unlock()
- if w.isClosed() {
+func (w *fen) Close() error {
+ if w.shared.close() {
return nil
}
- close(w.done)
return w.port.Close()
}
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; watching it more than once is a no-op and will
-// not return an error. Paths that do not yet exist on the filesystem cannot be
-// watched.
-//
-// A watch will be automatically removed if the watched path is deleted or
-// renamed. The exception is the Windows backend, which doesn't remove the
-// watcher on renames.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// Returns [ErrClosed] if [Watcher.Close] was called.
-//
-// See [Watcher.AddWith] for a version that allows adding options.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many programs (especially editors) update files atomically: it
-// will write to a temporary file which is then moved to to destination,
-// overwriting the original (or some variant thereof). The watcher on the
-// original file is now lost, as that no longer exists.
-//
-// The upshot of this is that a power failure or crash won't leave a
-// half-written file.
-//
-// Watch the parent directory and use Event.Name to filter out files you're not
-// interested in. There is an example of this in cmd/fsnotify/file.go.
-func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+func (w *fen) Add(name string) error { return w.AddWith(name) }
-// AddWith is like [Watcher.Add], but allows adding options. When using Add()
-// the defaults described below are used.
-//
-// Possible options are:
-//
-// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
-// other platforms. The default is 64K (65536 bytes).
-func (w *Watcher) AddWith(name string, opts ...addOpt) error {
+func (w *fen) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
- if w.port.PathIsWatched(name) {
- return nil
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
+ time.Now().Format("15:04:05.000000000"), name)
}
- _ = getOptions(opts...)
+ with := getOptions(opts...)
+ if !w.xSupports(with.op) {
+ return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
+ }
// Currently we resolve symlinks that were explicitly requested to be
// watched. Otherwise we would use LStat here.
@@ -283,7 +89,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
}
w.mu.Lock()
- w.dirs[name] = struct{}{}
+ w.dirs[name] = with.op
w.mu.Unlock()
return nil
}
@@ -294,26 +100,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
}
w.mu.Lock()
- w.watches[name] = struct{}{}
+ w.watches[name] = with.op
w.mu.Unlock()
return nil
}
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) Remove(name string) error {
+func (w *fen) Remove(name string) error {
if w.isClosed() {
return nil
}
if !w.port.PathIsWatched(name) {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
+ time.Now().Format("15:04:05.000000000"), name)
+ }
// The user has expressed an intent. Immediately remove this name from
// whichever watch list it might be in. If it's not in there the delete
@@ -346,7 +148,7 @@ func (w *Watcher) Remove(name string) error {
}
// readEvents contains the main loop that runs in a goroutine watching for events.
-func (w *Watcher) readEvents() {
+func (w *fen) readEvents() {
// If this function returns, the watcher has been closed and we can close
// these channels
defer func() {
@@ -367,7 +169,7 @@ func (w *Watcher) readEvents() {
return
}
// There was an error not caused by calling w.Close()
- if !w.sendError(err) {
+ if !w.sendError(fmt.Errorf("port.Get: %w", err)) {
return
}
}
@@ -382,17 +184,19 @@ func (w *Watcher) readEvents() {
continue
}
+ if debug {
+ internal.Debug(pevent.Path, pevent.Events)
+ }
+
err = w.handleEvent(&pevent)
- if err != nil {
- if !w.sendError(err) {
- return
- }
+ if !w.sendError(err) {
+ return
}
}
}
}
-func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
+func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
files, err := os.ReadDir(path)
if err != nil {
return err
@@ -418,7 +222,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha
// bitmap matches more than one event type (e.g. the file was both modified and
// had the attributes changed between when the association was created and the
// when event was returned)
-func (w *Watcher) handleEvent(event *unix.PortEvent) error {
+func (w *fen) handleEvent(event *unix.PortEvent) error {
var (
events = event.Events
path = event.Path
@@ -433,13 +237,13 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
isWatched := watchedDir || watchedPath
if events&unix.FILE_DELETE != 0 {
- if !w.sendEvent(path, Remove) {
+ if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
reRegister = false
}
if events&unix.FILE_RENAME_FROM != 0 {
- if !w.sendEvent(path, Rename) {
+ if !w.sendEvent(Event{Name: path, Op: Rename}) {
return nil
}
// Don't keep watching the new file name
@@ -453,7 +257,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
// inotify reports a Remove event in this case, so we simulate this
// here.
- if !w.sendEvent(path, Remove) {
+ if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Don't keep watching the file that was removed
@@ -487,7 +291,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
// get here, the sudirectory is already gone. Clearly we were watching
// this path but now it is gone. Let's tell the user that it was
// removed.
- if !w.sendEvent(path, Remove) {
+ if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Suppress extra write events on removed directories; they are not
@@ -502,7 +306,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
if err != nil {
// The symlink still exists, but the target is gone. Report the
// Remove similar to above.
- if !w.sendEvent(path, Remove) {
+ if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Don't return the error
@@ -510,18 +314,12 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
}
if events&unix.FILE_MODIFIED != 0 {
- if fmode.IsDir() {
- if watchedDir {
- if err := w.updateDirectory(path); err != nil {
- return err
- }
- } else {
- if !w.sendEvent(path, Write) {
- return nil
- }
+ if fmode.IsDir() && watchedDir {
+ if err := w.updateDirectory(path); err != nil {
+ return err
}
} else {
- if !w.sendEvent(path, Write) {
+ if !w.sendEvent(Event{Name: path, Op: Write}) {
return nil
}
}
@@ -529,7 +327,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
if events&unix.FILE_ATTRIB != 0 && stat != nil {
// Only send Chmod if perms changed
if stat.Mode().Perm() != fmode.Perm() {
- if !w.sendEvent(path, Chmod) {
+ if !w.sendEvent(Event{Name: path, Op: Chmod}) {
return nil
}
}
@@ -538,17 +336,27 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
if stat != nil {
// If we get here, it means we've hit an event above that requires us to
// continue watching the file or directory
- return w.associateFile(path, stat, isWatched)
+ err := w.associateFile(path, stat, isWatched)
+ if errors.Is(err, fs.ErrNotExist) {
+ // Path may have been removed since the stat.
+ err = nil
+ }
+ return err
}
return nil
}
-func (w *Watcher) updateDirectory(path string) error {
- // The directory was modified, so we must find unwatched entities and watch
- // them. If something was removed from the directory, nothing will happen,
- // as everything else should still be watched.
+// The directory was modified, so we must find unwatched entities and watch
+// them. If something was removed from the directory, nothing will happen, as
+// everything else should still be watched.
+func (w *fen) updateDirectory(path string) error {
files, err := os.ReadDir(path)
if err != nil {
+ // Directory no longer exists: probably just deleted since we got the
+ // event.
+ if errors.Is(err, fs.ErrNotExist) {
+ return nil
+ }
return err
}
@@ -563,19 +371,22 @@ func (w *Watcher) updateDirectory(path string) error {
return err
}
err = w.associateFile(path, finfo, false)
- if err != nil {
- if !w.sendError(err) {
- return nil
- }
+ if errors.Is(err, fs.ErrNotExist) {
+ // File may have disappeared between getting the dir listing and
+ // adding the port: that's okay to ignore.
+ continue
}
- if !w.sendEvent(path, Create) {
+ if !w.sendError(err) {
+ return nil
+ }
+ if !w.sendEvent(Event{Name: path, Op: Create}) {
return nil
}
}
return nil
}
-func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error {
+func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
if w.isClosed() {
return ErrClosed
}
@@ -593,34 +404,42 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro
// cleared up that discrepancy. The most likely cause is that the event
// has fired but we haven't processed it yet.
err := w.port.DissociatePath(path)
- if err != nil && err != unix.ENOENT {
- return err
+ if err != nil && !errors.Is(err, unix.ENOENT) {
+ return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
}
}
- // FILE_NOFOLLOW means we watch symlinks themselves rather than their
- // targets.
- events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW
- if follow {
- // We *DO* follow symlinks for explicitly watched entries.
- events = unix.FILE_MODIFIED | unix.FILE_ATTRIB
+
+ var events int
+ if !follow {
+ // Watch symlinks themselves rather than their targets unless this entry
+ // is explicitly watched.
+ events |= unix.FILE_NOFOLLOW
+ }
+ if true { // TODO: implement withOps()
+ events |= unix.FILE_MODIFIED
+ }
+ if true {
+ events |= unix.FILE_ATTRIB
}
- return w.port.AssociatePath(path, stat,
- events,
- stat.Mode())
+ err := w.port.AssociatePath(path, stat, events, stat.Mode())
+ if err != nil {
+ return fmt.Errorf("port.AssociatePath(%q): %w", path, err)
+ }
+ return nil
}
-func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error {
+func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error {
if !w.port.PathIsWatched(path) {
return nil
}
- return w.port.DissociatePath(path)
+ err := w.port.DissociatePath(path)
+ if err != nil {
+ return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
+ }
+ return nil
}
-// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
-// yet removed).
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) WatchList() []string {
+func (w *fen) WatchList() []string {
if w.isClosed() {
return nil
}
@@ -638,3 +457,11 @@ func (w *Watcher) WatchList() []string {
return entries
}
+
+func (w *fen) xSupports(op Op) bool {
+ if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
+ op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
index 921c1c1e4..a36cb89d7 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
@@ -1,8 +1,4 @@
//go:build linux && !appengine
-// +build linux,!appengine
-
-// Note: the documentation on the Watcher type and methods is generated from
-// mkdoc.zsh
package fsnotify
@@ -10,127 +6,21 @@ import (
"errors"
"fmt"
"io"
+ "io/fs"
"os"
"path/filepath"
"strings"
"sync"
+ "time"
"unsafe"
+ "github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # Windows notes
-//
-// Paths can be added as "C:\path\to\dir", but forward slashes
-// ("C:/path/to/dir") will also work.
-//
-// When a watched directory is removed it will always send an event for the
-// directory itself, but may not send events for all files in that directory.
-// Sometimes it will send events for all times, sometimes it will send no
-// events, and often only for some files.
-//
-// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
-// value that is guaranteed to work with SMB filesystems. If you have many
-// events in quick succession this may not be enough, and you will have to use
-// [WithBufferSize] to increase the value.
-type Watcher struct {
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, and you may
- // want to wait until you've stopped receiving them
- // (see the dedup example in cmd/fsnotify).
- //
- // Some systems may send Write event for directories
- // when the directory content changes.
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // when a file is truncated. On Windows it's never
- // sent.
+type inotify struct {
+ *shared
Events chan Event
-
- // Errors sends any errors.
- //
- // ErrEventOverflow is used to indicate there are too many events:
- //
- // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
- // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
- // - kqueue, fen: Not used.
Errors chan error
// Store fd here as os.File.Read() will no longer return on close after
@@ -138,21 +28,41 @@ type Watcher struct {
fd int
inotifyFile *os.File
watches *watches
- done chan struct{} // Channel for sending a "quit message" to the reader goroutine
- closeMu sync.Mutex
doneResp chan struct{} // Channel to respond to Close
+
+ // Store rename cookies in an array, with the index wrapping to 0. Almost
+ // all of the time what we get is a MOVED_FROM to set the cookie and the
+ // next event inotify sends will be MOVED_TO to read it. However, this is
+ // not guaranteed – as described in inotify(7) – and we may get other events
+ // between the two MOVED_* events (including other MOVED_* ones).
+ //
+ // A second issue is that moving a file outside the watched directory will
+ // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to
+ // read and delete it. So just storing it in a map would slowly leak memory.
+ //
+ // Doing it like this gives us a simple fast LRU-cache that won't allocate.
+ // Ten items should be more than enough for our purpose, and a loop over
+ // such a short array is faster than a map access anyway (not that it hugely
+ // matters since we're talking about hundreds of ns at the most, but still).
+ cookies [10]koekje
+ cookieIndex uint8
+ cookiesMu sync.Mutex
}
type (
watches struct {
- mu sync.RWMutex
wd map[uint32]*watch // wd → watch
path map[string]uint32 // pathname → wd
}
watch struct {
- wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
- flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
- path string // Watch path.
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+ path string // Watch path.
+ recurse bool // Recursion with ./...?
+ }
+ koekje struct {
+ cookie uint32
+ path string
}
)
@@ -163,57 +73,43 @@ func newWatches() *watches {
}
}
-func (w *watches) len() int {
- w.mu.RLock()
- defer w.mu.RUnlock()
- return len(w.wd)
-}
-
-func (w *watches) add(ww *watch) {
- w.mu.Lock()
- defer w.mu.Unlock()
- w.wd[ww.wd] = ww
- w.path[ww.path] = ww.wd
-}
-
-func (w *watches) remove(wd uint32) {
- w.mu.Lock()
- defer w.mu.Unlock()
- delete(w.path, w.wd[wd].path)
- delete(w.wd, wd)
-}
-
-func (w *watches) removePath(path string) (uint32, bool) {
- w.mu.Lock()
- defer w.mu.Unlock()
+func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] }
+func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] }
+func (w *watches) len() int { return len(w.wd) }
+func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd }
+func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) }
+func (w *watches) removePath(path string) ([]uint32, error) {
+ path, recurse := recursivePath(path)
wd, ok := w.path[path]
if !ok {
- return 0, false
+ return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path)
+ }
+
+ watch := w.wd[wd]
+ if recurse && !watch.recurse {
+ return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path)
}
delete(w.path, path)
delete(w.wd, wd)
+ if !watch.recurse {
+ return []uint32{wd}, nil
+ }
- return wd, true
-}
-
-func (w *watches) byPath(path string) *watch {
- w.mu.RLock()
- defer w.mu.RUnlock()
- return w.wd[w.path[path]]
-}
-
-func (w *watches) byWd(wd uint32) *watch {
- w.mu.RLock()
- defer w.mu.RUnlock()
- return w.wd[wd]
+ wds := make([]uint32, 0, 8)
+ wds = append(wds, wd)
+ for p, rwd := range w.path {
+ if strings.HasPrefix(p, path) {
+ delete(w.path, p)
+ delete(w.wd, rwd)
+ wds = append(wds, rwd)
+ }
+ }
+ return wds, nil
}
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
- w.mu.Lock()
- defer w.mu.Unlock()
-
var existing *watch
wd, ok := w.path[path]
if ok {
@@ -236,20 +132,9 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error
return nil
}
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
- return NewBufferedWatcher(0)
-}
+var defaultBufferSize = 0
-// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
-// channel.
-//
-// The main use case for this is situations with a very large number of events
-// where the kernel buffer size can't be increased (e.g. due to lack of
-// permissions). An unbuffered Watcher will perform better for almost all use
-// cases, and whenever possible you will be better off increasing the kernel
-// buffers instead of adding a large userspace buffer.
-func NewBufferedWatcher(sz uint) (*Watcher, error) {
+func newBackend(ev chan Event, errs chan error) (backend, error) {
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
// I/O operations won't terminate on close.
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
@@ -257,13 +142,13 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) {
return nil, errno
}
- w := &Watcher{
+ w := &inotify{
+ shared: newShared(ev, errs),
+ Events: ev,
+ Errors: errs,
fd: fd,
inotifyFile: os.NewFile(uintptr(fd), ""),
watches: newWatches(),
- Events: make(chan Event, sz),
- Errors: make(chan error),
- done: make(chan struct{}),
doneResp: make(chan struct{}),
}
@@ -271,44 +156,10 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) {
return w, nil
}
-// Returns true if the event was sent, or false if watcher is closed.
-func (w *Watcher) sendEvent(e Event) bool {
- select {
- case w.Events <- e:
- return true
- case <-w.done:
- return false
- }
-}
-
-// Returns true if the error was sent, or false if watcher is closed.
-func (w *Watcher) sendError(err error) bool {
- select {
- case w.Errors <- err:
- return true
- case <-w.done:
- return false
- }
-}
-
-func (w *Watcher) isClosed() bool {
- select {
- case <-w.done:
- return true
- default:
- return false
- }
-}
-
-// Close removes all watches and closes the Events channel.
-func (w *Watcher) Close() error {
- w.closeMu.Lock()
- if w.isClosed() {
- w.closeMu.Unlock()
+func (w *inotify) Close() error {
+ if w.shared.close() {
return nil
}
- close(w.done)
- w.closeMu.Unlock()
// Causes any blocking reads to return with an error, provided the file
// still supports deadline operations.
@@ -317,84 +168,114 @@ func (w *Watcher) Close() error {
return err
}
- // Wait for goroutine to close
- <-w.doneResp
-
+ <-w.doneResp // Wait for readEvents() to finish.
return nil
}
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; watching it more than once is a no-op and will
-// not return an error. Paths that do not yet exist on the filesystem cannot be
-// watched.
-//
-// A watch will be automatically removed if the watched path is deleted or
-// renamed. The exception is the Windows backend, which doesn't remove the
-// watcher on renames.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// Returns [ErrClosed] if [Watcher.Close] was called.
-//
-// See [Watcher.AddWith] for a version that allows adding options.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many programs (especially editors) update files atomically: it
-// will write to a temporary file which is then moved to to destination,
-// overwriting the original (or some variant thereof). The watcher on the
-// original file is now lost, as that no longer exists.
-//
-// The upshot of this is that a power failure or crash won't leave a
-// half-written file.
-//
-// Watch the parent directory and use Event.Name to filter out files you're not
-// interested in. There is an example of this in cmd/fsnotify/file.go.
-func (w *Watcher) Add(name string) error { return w.AddWith(name) }
-
-// AddWith is like [Watcher.Add], but allows adding options. When using Add()
-// the defaults described below are used.
-//
-// Possible options are:
-//
-// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
-// other platforms. The default is 64K (65536 bytes).
-func (w *Watcher) AddWith(name string, opts ...addOpt) error {
+func (w *inotify) Add(name string) error { return w.AddWith(name) }
+
+func (w *inotify) AddWith(path string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
+ time.Now().Format("15:04:05.000000000"), path)
+ }
+
+ with := getOptions(opts...)
+ if !w.xSupports(with.op) {
+ return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
+ }
+
+ add := func(path string, with withOpts, recurse bool) error {
+ var flags uint32
+ if with.noFollow {
+ flags |= unix.IN_DONT_FOLLOW
+ }
+ if with.op.Has(Create) {
+ flags |= unix.IN_CREATE
+ }
+ if with.op.Has(Write) {
+ flags |= unix.IN_MODIFY
+ }
+ if with.op.Has(Remove) {
+ flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
+ }
+ if with.op.Has(Rename) {
+ flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
+ }
+ if with.op.Has(Chmod) {
+ flags |= unix.IN_ATTRIB
+ }
+ if with.op.Has(xUnportableOpen) {
+ flags |= unix.IN_OPEN
+ }
+ if with.op.Has(xUnportableRead) {
+ flags |= unix.IN_ACCESS
+ }
+ if with.op.Has(xUnportableCloseWrite) {
+ flags |= unix.IN_CLOSE_WRITE
+ }
+ if with.op.Has(xUnportableCloseRead) {
+ flags |= unix.IN_CLOSE_NOWRITE
+ }
+ return w.register(path, flags, recurse)
+ }
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ path, recurse := recursivePath(path)
+ if recurse {
+ return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if !d.IsDir() {
+ if root == path {
+ return fmt.Errorf("fsnotify: not a directory: %q", path)
+ }
+ return nil
+ }
+
+ // Send a Create event when adding new directory from a recursive
+ // watch; this is for "mkdir -p one/two/three". Usually all those
+ // directories will be created before we can set up watchers on the
+ // subdirectories, so only "one" would be sent as a Create event and
+ // not "one/two" and "one/two/three" (inotifywait -r has the same
+ // problem).
+ if with.sendCreate && root != path {
+ w.sendEvent(Event{Name: root, Op: Create})
+ }
- name = filepath.Clean(name)
- _ = getOptions(opts...)
+ return add(root, with, true)
+ })
+ }
- var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
- unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
- unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+ return add(path, with, false)
+}
- return w.watches.updatePath(name, func(existing *watch) (*watch, error) {
+func (w *inotify) register(path string, flags uint32, recurse bool) error {
+ return w.watches.updatePath(path, func(existing *watch) (*watch, error) {
if existing != nil {
flags |= existing.flags | unix.IN_MASK_ADD
}
- wd, err := unix.InotifyAddWatch(w.fd, name, flags)
+ wd, err := unix.InotifyAddWatch(w.fd, path, flags)
if wd == -1 {
return nil, err
}
+ if e, ok := w.watches.wd[uint32(wd)]; ok {
+ return e, nil
+ }
+
if existing == nil {
return &watch{
- wd: uint32(wd),
- path: name,
- flags: flags,
+ wd: uint32(wd),
+ path: path,
+ flags: flags,
+ recurse: recurse,
}, nil
}
@@ -404,87 +285,80 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
})
}
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) Remove(name string) error {
+func (w *inotify) Remove(name string) error {
if w.isClosed() {
return nil
}
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
+ time.Now().Format("15:04:05.000000000"), name)
+ }
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
return w.remove(filepath.Clean(name))
}
-func (w *Watcher) remove(name string) error {
- wd, ok := w.watches.removePath(name)
- if !ok {
- return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
- }
-
- success, errno := unix.InotifyRmWatch(w.fd, wd)
- if success == -1 {
- // TODO: Perhaps it's not helpful to return an error here in every case;
- // The only two possible errors are:
- //
- // - EBADF, which happens when w.fd is not a valid file descriptor
- // of any kind.
- // - EINVAL, which is when fd is not an inotify descriptor or wd
- // is not a valid watch descriptor. Watch descriptors are
- // invalidated when they are removed explicitly or implicitly;
- // explicitly by inotify_rm_watch, implicitly when the file they
- // are watching is deleted.
- return errno
+func (w *inotify) remove(name string) error {
+ wds, err := w.watches.removePath(name)
+ if err != nil {
+ return err
+ }
+
+ for _, wd := range wds {
+ _, err := unix.InotifyRmWatch(w.fd, wd)
+ if err != nil {
+ // TODO: Perhaps it's not helpful to return an error here in every
+ // case; the only two possible errors are:
+ //
+ // EBADF, which happens when w.fd is not a valid file descriptor of
+ // any kind.
+ //
+ // EINVAL, which is when fd is not an inotify descriptor or wd is
+ // not a valid watch descriptor. Watch descriptors are invalidated
+ // when they are removed explicitly or implicitly; explicitly by
+ // inotify_rm_watch, implicitly when the file they are watching is
+ // deleted.
+ return err
+ }
}
return nil
}
-// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
-// yet removed).
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) WatchList() []string {
+func (w *inotify) WatchList() []string {
if w.isClosed() {
return nil
}
+ w.mu.Lock()
+ defer w.mu.Unlock()
entries := make([]string, 0, w.watches.len())
- w.watches.mu.RLock()
for pathname := range w.watches.path {
entries = append(entries, pathname)
}
- w.watches.mu.RUnlock()
-
return entries
}
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
-func (w *Watcher) readEvents() {
+func (w *inotify) readEvents() {
defer func() {
close(w.doneResp)
close(w.Errors)
close(w.Events)
}()
- var (
- buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
- errno error // Syscall errno
- )
+ var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
for {
- // See if we have been closed.
if w.isClosed() {
return
}
n, err := w.inotifyFile.Read(buf[:])
- switch {
- case errors.Unwrap(err) == os.ErrClosed:
- return
- case err != nil:
+ if err != nil {
+ if errors.Is(err, os.ErrClosed) {
+ return
+ }
if !w.sendError(err) {
return
}
@@ -492,13 +366,9 @@ func (w *Watcher) readEvents() {
}
if n < unix.SizeofInotifyEvent {
- var err error
+ err := errors.New("notify: short read in readEvents()") // Read was too short.
if n == 0 {
err = io.EOF // If EOF is received. This should really never happen.
- } else if n < 0 {
- err = errno // If an error occurred while reading.
- } else {
- err = errors.New("notify: short read in readEvents()") // Read was too short.
}
if !w.sendError(err) {
return
@@ -506,74 +376,146 @@ func (w *Watcher) readEvents() {
continue
}
+ // We don't know how many events we just read into the buffer While the
+ // offset points to at least one whole event.
var offset uint32
- // We don't know how many events we just read into the buffer
- // While the offset points to at least one whole event...
for offset <= uint32(n-unix.SizeofInotifyEvent) {
- var (
- // Point "raw" to the event in the buffer
- raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
- mask = uint32(raw.Mask)
- nameLen = uint32(raw.Len)
- )
-
- if mask&unix.IN_Q_OVERFLOW != 0 {
+ // Point to the event in the buffer.
+ inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+ if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 {
if !w.sendError(ErrEventOverflow) {
return
}
}
- // If the event happened to the watched directory or the watched file, the kernel
- // doesn't append the filename to the event, but we would like to always fill the
- // the "Name" field with a valid filename. We retrieve the path of the watch from
- // the "paths" map.
- watch := w.watches.byWd(uint32(raw.Wd))
-
- // inotify will automatically remove the watch on deletes; just need
- // to clean our state here.
- if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
- w.watches.remove(watch.wd)
+ ev, ok := w.handleEvent(inEvent, &buf, offset)
+ if !ok {
+ return
}
- // We can't really update the state when a watched path is moved;
- // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove
- // the watch.
- if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
- err := w.remove(watch.path)
- if err != nil && !errors.Is(err, ErrNonExistentWatch) {
- if !w.sendError(err) {
- return
- }
- }
+ if !w.sendEvent(ev) {
+ return
}
- var name string
- if watch != nil {
- name = watch.path
- }
- if nameLen > 0 {
- // Point "bytes" at the first byte of the filename
- bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
- // The filename is padded with NULL bytes. TrimRight() gets rid of those.
- name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ // Move to the next event in the buffer
+ offset += unix.SizeofInotifyEvent + inEvent.Len
+ }
+ }
+}
+
+func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ /// If the event happened to the watched directory or the watched file, the
+ /// kernel doesn't append the filename to the event, but we would like to
+ /// always fill the the "Name" field with a valid filename. We retrieve the
+ /// path of the watch from the "paths" map.
+ ///
+ /// Can be nil if Remove() was called in another goroutine for this path
+ /// inbetween reading the events from the kernel and reading the internal
+ /// state. Not much we can do about it, so just skip. See #616.
+ watch := w.watches.byWd(uint32(inEvent.Wd))
+ if watch == nil {
+ return Event{}, true
+ }
+
+ var (
+ name = watch.path
+ nameLen = uint32(inEvent.Len)
+ )
+ if nameLen > 0 {
+ /// Point "bytes" at the first byte of the filename
+ bb := *buf
+ bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
+ /// The filename is padded with NULL bytes. TrimRight() gets rid of those.
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00")
+ }
+
+ if debug {
+ internal.Debug(name, inEvent.Mask, inEvent.Cookie)
+ }
+
+ if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 {
+ w.watches.remove(watch)
+ return Event{}, true
+ }
+
+ // inotify will automatically remove the watch on deletes; just need
+ // to clean our state here.
+ if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ w.watches.remove(watch)
+ }
+
+ // We can't really update the state when a watched path is moved; only
+ // IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch.
+ if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
+ if watch.recurse { // Do nothing
+ return Event{}, true
+ }
+
+ err := w.remove(watch.path)
+ if err != nil && !errors.Is(err, ErrNonExistentWatch) {
+ if !w.sendError(err) {
+ return Event{}, false
}
+ }
+ }
+
+ /// Skip if we're watching both this path and the parent; the parent will
+ /// already send a delete so no need to do it twice.
+ if inEvent.Mask&unix.IN_DELETE_SELF != 0 {
+ _, ok := w.watches.path[filepath.Dir(watch.path)]
+ if ok {
+ return Event{}, true
+ }
+ }
- event := w.newEvent(name, mask)
+ ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie)
+ // Need to update watch path for recurse.
+ if watch.recurse {
+ isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR
+ /// New directory created: set up watch on it.
+ if isDir && ev.Has(Create) {
+ err := w.register(ev.Name, watch.flags, true)
+ if !w.sendError(err) {
+ return Event{}, false
+ }
- // Send the events that are not ignored on the events channel
- if mask&unix.IN_IGNORED == 0 {
- if !w.sendEvent(event) {
- return
+ // This was a directory rename, so we need to update all the
+ // children.
+ //
+ // TODO: this is of course pretty slow; we should use a better data
+ // structure for storing all of this, e.g. store children in the
+ // watch. I have some code for this in my kqueue refactor we can use
+ // in the future. For now I'm okay with this as it's not publicly
+ // available. Correctness first, performance second.
+ if ev.renamedFrom != "" {
+ for k, ww := range w.watches.wd {
+ if k == watch.wd || ww.path == ev.Name {
+ continue
+ }
+ if strings.HasPrefix(ww.path, ev.renamedFrom) {
+ ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
+ w.watches.wd[k] = ww
+ }
}
}
-
- // Move to the next event in the buffer
- offset += unix.SizeofInotifyEvent + nameLen
}
}
+
+ return ev, true
+}
+
+func (w *inotify) isRecursive(path string) bool {
+ ww := w.watches.byPath(path)
+ if ww == nil { // path could be a file, so also check the Dir.
+ ww = w.watches.byPath(filepath.Dir(path))
+ }
+ return ww != nil && ww.recurse
}
-// newEvent returns an platform-independent Event based on an inotify mask.
-func (w *Watcher) newEvent(name string, mask uint32) Event {
+func (w *inotify) newEvent(name string, mask, cookie uint32) Event {
e := Event{Name: name}
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
e.Op |= Create
@@ -584,11 +526,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
e.Op |= Write
}
+ if mask&unix.IN_OPEN == unix.IN_OPEN {
+ e.Op |= xUnportableOpen
+ }
+ if mask&unix.IN_ACCESS == unix.IN_ACCESS {
+ e.Op |= xUnportableRead
+ }
+ if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE {
+ e.Op |= xUnportableCloseWrite
+ }
+ if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE {
+ e.Op |= xUnportableCloseRead
+ }
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
e.Op |= Rename
}
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
e.Op |= Chmod
}
+
+ if cookie != 0 {
+ if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+ w.cookiesMu.Lock()
+ w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name}
+ w.cookieIndex++
+ if w.cookieIndex > 9 {
+ w.cookieIndex = 0
+ }
+ w.cookiesMu.Unlock()
+ } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+ w.cookiesMu.Lock()
+ var prev string
+ for _, c := range w.cookies {
+ if c.cookie == cookie {
+ prev = c.path
+ break
+ }
+ }
+ w.cookiesMu.Unlock()
+ e.renamedFrom = prev
+ }
+ }
return e
}
+
+func (w *inotify) xSupports(op Op) bool {
+ return true // Supports everything.
+}
+
+func (w *inotify) state() {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ for wd, ww := range w.watches.wd {
+ fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path)
+ }
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
index 063a0915a..340aeec06 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
@@ -1,8 +1,4 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
-// +build freebsd openbsd netbsd dragonfly darwin
-
-// Note: the documentation on the Watcher type and methods is generated from
-// mkdoc.zsh
package fsnotify
@@ -11,174 +7,196 @@ import (
"fmt"
"os"
"path/filepath"
+ "runtime"
"sync"
+ "time"
+ "github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # Windows notes
-//
-// Paths can be added as "C:\path\to\dir", but forward slashes
-// ("C:/path/to/dir") will also work.
-//
-// When a watched directory is removed it will always send an event for the
-// directory itself, but may not send events for all files in that directory.
-// Sometimes it will send events for all times, sometimes it will send no
-// events, and often only for some files.
-//
-// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
-// value that is guaranteed to work with SMB filesystems. If you have many
-// events in quick succession this may not be enough, and you will have to use
-// [WithBufferSize] to increase the value.
-type Watcher struct {
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, and you may
- // want to wait until you've stopped receiving them
- // (see the dedup example in cmd/fsnotify).
- //
- // Some systems may send Write event for directories
- // when the directory content changes.
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // when a file is truncated. On Windows it's never
- // sent.
+type kqueue struct {
+ *shared
Events chan Event
-
- // Errors sends any errors.
- //
- // ErrEventOverflow is used to indicate there are too many events:
- //
- // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
- // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
- // - kqueue, fen: Not used.
Errors chan error
- done chan struct{}
- kq int // File descriptor (as returned by the kqueue() syscall).
- closepipe [2]int // Pipe used for closing.
- mu sync.Mutex // Protects access to watcher data
- watches map[string]int // Watched file descriptors (key: path).
- watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)).
- userWatches map[string]struct{} // Watches added with Watcher.Add()
- dirFlags map[string]uint32 // Watched directories to fflags used in kqueue.
- paths map[int]pathInfo // File descriptors to path names for processing kqueue events.
- fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events).
- isClosed bool // Set to true when Close() is first called
+ kq int // File descriptor (as returned by the kqueue() syscall).
+ closepipe [2]int // Pipe used for closing kq.
+ watches *watches
}
-type pathInfo struct {
- name string
- isDir bool
+type (
+ watches struct {
+ mu sync.RWMutex
+ wd map[int]watch // wd → watch
+ path map[string]int // pathname → wd
+ byDir map[string]map[int]struct{} // dirname(path) → wd
+ seen map[string]struct{} // Keep track of if we know this file exists.
+ byUser map[string]struct{} // Watches added with Watcher.Add()
+ }
+ watch struct {
+ wd int
+ name string
+ linkName string // In case of links; name is the target, and this is the link.
+ isDir bool
+ dirFlags uint32
+ }
+)
+
+func newWatches() *watches {
+ return &watches{
+ wd: make(map[int]watch),
+ path: make(map[string]int),
+ byDir: make(map[string]map[int]struct{}),
+ seen: make(map[string]struct{}),
+ byUser: make(map[string]struct{}),
+ }
}
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
- return NewBufferedWatcher(0)
+func (w *watches) listPaths(userOnly bool) []string {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+
+ if userOnly {
+ l := make([]string, 0, len(w.byUser))
+ for p := range w.byUser {
+ l = append(l, p)
+ }
+ return l
+ }
+
+ l := make([]string, 0, len(w.path))
+ for p := range w.path {
+ l = append(l, p)
+ }
+ return l
}
-// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
-// channel.
-//
-// The main use case for this is situations with a very large number of events
-// where the kernel buffer size can't be increased (e.g. due to lack of
-// permissions). An unbuffered Watcher will perform better for almost all use
-// cases, and whenever possible you will be better off increasing the kernel
-// buffers instead of adding a large userspace buffer.
-func NewBufferedWatcher(sz uint) (*Watcher, error) {
+func (w *watches) watchesInDir(path string) []string {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+
+ l := make([]string, 0, 4)
+ for fd := range w.byDir[path] {
+ info := w.wd[fd]
+ if _, ok := w.byUser[info.name]; !ok {
+ l = append(l, info.name)
+ }
+ }
+ return l
+}
+
+// Mark path as added by the user.
+func (w *watches) addUserWatch(path string) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ w.byUser[path] = struct{}{}
+}
+
+func (w *watches) addLink(path string, fd int) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ w.path[path] = fd
+ w.seen[path] = struct{}{}
+}
+
+func (w *watches) add(path, linkPath string, fd int, isDir bool) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ w.path[path] = fd
+ w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir}
+
+ parent := filepath.Dir(path)
+ byDir, ok := w.byDir[parent]
+ if !ok {
+ byDir = make(map[int]struct{}, 1)
+ w.byDir[parent] = byDir
+ }
+ byDir[fd] = struct{}{}
+}
+
+func (w *watches) byWd(fd int) (watch, bool) {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+ info, ok := w.wd[fd]
+ return info, ok
+}
+
+func (w *watches) byPath(path string) (watch, bool) {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+ info, ok := w.wd[w.path[path]]
+ return info, ok
+}
+
+func (w *watches) updateDirFlags(path string, flags uint32) bool {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ fd, ok := w.path[path]
+ if !ok { // Already deleted: don't re-set it here.
+ return false
+ }
+ info := w.wd[fd]
+ info.dirFlags = flags
+ w.wd[fd] = info
+ return true
+}
+
+func (w *watches) remove(fd int, path string) bool {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ isDir := w.wd[fd].isDir
+ delete(w.path, path)
+ delete(w.byUser, path)
+
+ parent := filepath.Dir(path)
+ delete(w.byDir[parent], fd)
+
+ if len(w.byDir[parent]) == 0 {
+ delete(w.byDir, parent)
+ }
+
+ delete(w.wd, fd)
+ delete(w.seen, path)
+ return isDir
+}
+
+func (w *watches) markSeen(path string, exists bool) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if exists {
+ w.seen[path] = struct{}{}
+ } else {
+ delete(w.seen, path)
+ }
+}
+
+func (w *watches) seenBefore(path string) bool {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+ _, ok := w.seen[path]
+ return ok
+}
+
+var defaultBufferSize = 0
+
+func newBackend(ev chan Event, errs chan error) (backend, error) {
kq, closepipe, err := newKqueue()
if err != nil {
return nil, err
}
- w := &Watcher{
- kq: kq,
- closepipe: closepipe,
- watches: make(map[string]int),
- watchesByDir: make(map[string]map[int]struct{}),
- dirFlags: make(map[string]uint32),
- paths: make(map[int]pathInfo),
- fileExists: make(map[string]struct{}),
- userWatches: make(map[string]struct{}),
- Events: make(chan Event, sz),
- Errors: make(chan error),
- done: make(chan struct{}),
+ w := &kqueue{
+ shared: newShared(ev, errs),
+ Events: ev,
+ Errors: errs,
+ kq: kq,
+ closepipe: closepipe,
+ watches: newWatches(),
}
go w.readEvents()
@@ -193,7 +211,7 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) {
// all.
func newKqueue() (kq int, closepipe [2]int, err error) {
kq, err = unix.Kqueue()
- if kq == -1 {
+ if err != nil {
return kq, closepipe, err
}
@@ -203,6 +221,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) {
unix.Close(kq)
return kq, closepipe, err
}
+ unix.CloseOnExec(closepipe[0])
+ unix.CloseOnExec(closepipe[1])
// Register changes to listen on the closepipe.
changes := make([]unix.Kevent_t, 1)
@@ -220,167 +240,72 @@ func newKqueue() (kq int, closepipe [2]int, err error) {
return kq, closepipe, nil
}
-// Returns true if the event was sent, or false if watcher is closed.
-func (w *Watcher) sendEvent(e Event) bool {
- select {
- case w.Events <- e:
- return true
- case <-w.done:
- return false
- }
-}
-
-// Returns true if the error was sent, or false if watcher is closed.
-func (w *Watcher) sendError(err error) bool {
- select {
- case w.Errors <- err:
- return true
- case <-w.done:
- return false
- }
-}
-
-// Close removes all watches and closes the Events channel.
-func (w *Watcher) Close() error {
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
+func (w *kqueue) Close() error {
+ if w.shared.close() {
return nil
}
- w.isClosed = true
- // copy paths to remove while locked
- pathsToRemove := make([]string, 0, len(w.watches))
- for name := range w.watches {
- pathsToRemove = append(pathsToRemove, name)
- }
- w.mu.Unlock() // Unlock before calling Remove, which also locks
+ pathsToRemove := w.watches.listPaths(false)
for _, name := range pathsToRemove {
w.Remove(name)
}
- // Send "quit" message to the reader goroutine.
- unix.Close(w.closepipe[1])
- close(w.done)
-
+ unix.Close(w.closepipe[1]) // Send "quit" message to readEvents
return nil
}
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; watching it more than once is a no-op and will
-// not return an error. Paths that do not yet exist on the filesystem cannot be
-// watched.
-//
-// A watch will be automatically removed if the watched path is deleted or
-// renamed. The exception is the Windows backend, which doesn't remove the
-// watcher on renames.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// Returns [ErrClosed] if [Watcher.Close] was called.
-//
-// See [Watcher.AddWith] for a version that allows adding options.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many programs (especially editors) update files atomically: it
-// will write to a temporary file which is then moved to to destination,
-// overwriting the original (or some variant thereof). The watcher on the
-// original file is now lost, as that no longer exists.
-//
-// The upshot of this is that a power failure or crash won't leave a
-// half-written file.
-//
-// Watch the parent directory and use Event.Name to filter out files you're not
-// interested in. There is an example of this in cmd/fsnotify/file.go.
-func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+func (w *kqueue) Add(name string) error { return w.AddWith(name) }
-// AddWith is like [Watcher.Add], but allows adding options. When using Add()
-// the defaults described below are used.
-//
-// Possible options are:
-//
-// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
-// other platforms. The default is 64K (65536 bytes).
-func (w *Watcher) AddWith(name string, opts ...addOpt) error {
- _ = getOptions(opts...)
+func (w *kqueue) AddWith(name string, opts ...addOpt) error {
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
+ time.Now().Format("15:04:05.000000000"), name)
+ }
- w.mu.Lock()
- w.userWatches[name] = struct{}{}
- w.mu.Unlock()
- _, err := w.addWatch(name, noteAllEvents)
- return err
+ with := getOptions(opts...)
+ if !w.xSupports(with.op) {
+ return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
+ }
+
+ _, err := w.addWatch(name, noteAllEvents, false)
+ if err != nil {
+ return err
+ }
+ w.watches.addUserWatch(name)
+ return nil
}
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) Remove(name string) error {
+func (w *kqueue) Remove(name string) error {
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
+ time.Now().Format("15:04:05.000000000"), name)
+ }
return w.remove(name, true)
}
-func (w *Watcher) remove(name string, unwatchFiles bool) error {
- name = filepath.Clean(name)
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
+func (w *kqueue) remove(name string, unwatchFiles bool) error {
+ if w.isClosed() {
return nil
}
- watchfd, ok := w.watches[name]
- w.mu.Unlock()
+
+ name = filepath.Clean(name)
+ info, ok := w.watches.byPath(name)
if !ok {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
- err := w.register([]int{watchfd}, unix.EV_DELETE, 0)
+ err := w.register([]int{info.wd}, unix.EV_DELETE, 0)
if err != nil {
return err
}
- unix.Close(watchfd)
-
- w.mu.Lock()
- isDir := w.paths[watchfd].isDir
- delete(w.watches, name)
- delete(w.userWatches, name)
-
- parentName := filepath.Dir(name)
- delete(w.watchesByDir[parentName], watchfd)
-
- if len(w.watchesByDir[parentName]) == 0 {
- delete(w.watchesByDir, parentName)
- }
+ unix.Close(info.wd)
- delete(w.paths, watchfd)
- delete(w.dirFlags, name)
- delete(w.fileExists, name)
- w.mu.Unlock()
+ isDir := w.watches.remove(info.wd, name)
// Find all watched paths that are in this directory that are not external.
if unwatchFiles && isDir {
- var pathsToRemove []string
- w.mu.Lock()
- for fd := range w.watchesByDir[name] {
- path := w.paths[fd]
- if _, ok := w.userWatches[path.name]; !ok {
- pathsToRemove = append(pathsToRemove, path.name)
- }
- }
- w.mu.Unlock()
+ pathsToRemove := w.watches.watchesInDir(name)
for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error to
// the user, as that will just confuse them with an error about a
@@ -391,23 +316,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error {
return nil
}
-// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
-// yet removed).
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) WatchList() []string {
- w.mu.Lock()
- defer w.mu.Unlock()
- if w.isClosed {
+func (w *kqueue) WatchList() []string {
+ if w.isClosed() {
return nil
}
-
- entries := make([]string, 0, len(w.userWatches))
- for pathname := range w.userWatches {
- entries = append(entries, pathname)
- }
-
- return entries
+ return w.watches.listPaths(true)
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
@@ -417,114 +330,93 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un
// described in kevent(2).
//
// Returns the real path to the file which was added, with symlinks resolved.
-func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
- var isDir bool
- name = filepath.Clean(name)
-
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
+func (w *kqueue) addWatch(name string, flags uint32, listDir bool) (string, error) {
+ if w.isClosed() {
return "", ErrClosed
}
- watchfd, alreadyWatching := w.watches[name]
- // We already have a watch, but we can still override flags.
- if alreadyWatching {
- isDir = w.paths[watchfd].isDir
- }
- w.mu.Unlock()
+ name = filepath.Clean(name)
+
+ info, alreadyWatching := w.watches.byPath(name)
if !alreadyWatching {
fi, err := os.Lstat(name)
if err != nil {
return "", err
}
- // Don't watch sockets or named pipes
+ // Don't watch sockets or named pipes.
if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) {
return "", nil
}
- // Follow Symlinks.
- if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ // Follow symlinks, but only for paths added with Add(), and not paths
+ // we're adding from internalWatch from a listdir.
+ if !listDir && fi.Mode()&os.ModeSymlink == os.ModeSymlink {
link, err := os.Readlink(name)
if err != nil {
- // Return nil because Linux can add unresolvable symlinks to the
- // watch list without problems, so maintain consistency with
- // that. There will be no file events for broken symlinks.
- // TODO: more specific check; returns os.PathError; ENOENT?
- return "", nil
+ return "", err
+ }
+ if !filepath.IsAbs(link) {
+ link = filepath.Join(filepath.Dir(name), link)
}
- w.mu.Lock()
- _, alreadyWatching = w.watches[link]
- w.mu.Unlock()
-
+ _, alreadyWatching = w.watches.byPath(link)
if alreadyWatching {
// Add to watches so we don't get spurious Create events later
// on when we diff the directories.
- w.watches[name] = 0
- w.fileExists[name] = struct{}{}
+ w.watches.addLink(name, 0)
return link, nil
}
+ info.linkName = name
name = link
fi, err = os.Lstat(name)
if err != nil {
- return "", nil
+ return "", err
}
}
// Retry on EINTR; open() can return EINTR in practice on macOS.
// See #354, and Go issues 11180 and 39237.
for {
- watchfd, err = unix.Open(name, openMode, 0)
+ info.wd, err = unix.Open(name, openMode, 0)
if err == nil {
break
}
if errors.Is(err, unix.EINTR) {
continue
}
-
return "", err
}
- isDir = fi.IsDir()
+ info.isDir = fi.IsDir()
}
- err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
+ err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
if err != nil {
- unix.Close(watchfd)
+ unix.Close(info.wd)
return "", err
}
if !alreadyWatching {
- w.mu.Lock()
- parentName := filepath.Dir(name)
- w.watches[name] = watchfd
-
- watchesByDir, ok := w.watchesByDir[parentName]
- if !ok {
- watchesByDir = make(map[int]struct{}, 1)
- w.watchesByDir[parentName] = watchesByDir
- }
- watchesByDir[watchfd] = struct{}{}
- w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
- w.mu.Unlock()
+ w.watches.add(name, info.linkName, info.wd, info.isDir)
}
- if isDir {
- // Watch the directory if it has not been watched before, or if it was
- // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
- w.mu.Lock()
-
+ // Watch the directory if it has not been watched before, or if it was
+ // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ if info.isDir {
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
- (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
- // Store flags so this watch can be updated later
- w.dirFlags[name] = flags
- w.mu.Unlock()
+ (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+ if !w.watches.updateDirFlags(name, flags) {
+ return "", nil
+ }
if watchDir {
- if err := w.watchDirectoryFiles(name); err != nil {
+ d := name
+ if info.linkName != "" {
+ d = info.linkName
+ }
+ if err := w.watchDirectoryFiles(d); err != nil {
return "", err
}
}
@@ -534,7 +426,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
// readEvents reads from kqueue and converts the received kevents into
// Event values that it sends down the Events channel.
-func (w *Watcher) readEvents() {
+func (w *kqueue) readEvents() {
defer func() {
close(w.Events)
close(w.Errors)
@@ -543,50 +435,65 @@ func (w *Watcher) readEvents() {
}()
eventBuffer := make([]unix.Kevent_t, 10)
- for closed := false; !closed; {
+ for {
kevents, err := w.read(eventBuffer)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
- closed = true
+ return
}
- continue
}
- // Flush the events we received to the Events channel
for _, kevent := range kevents {
var (
- watchfd = int(kevent.Ident)
- mask = uint32(kevent.Fflags)
+ wd = int(kevent.Ident)
+ mask = uint32(kevent.Fflags)
)
// Shut down the loop when the pipe is closed, but only after all
// other events have been processed.
- if watchfd == w.closepipe[0] {
- closed = true
- continue
+ if wd == w.closepipe[0] {
+ return
+ }
+
+ path, ok := w.watches.byWd(wd)
+ if debug {
+ internal.Debug(path.name, &kevent)
}
- w.mu.Lock()
- path := w.paths[watchfd]
- w.mu.Unlock()
+ // On macOS it seems that sometimes an event with Ident=0 is
+ // delivered, and no other flags/information beyond that, even
+ // though we never saw such a file descriptor. For example in
+ // TestWatchSymlink/277 (usually at the end, but sometimes sooner):
+ //
+ // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent)
+ // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
+ // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
+ //
+ // The first is a normal event, the second with Ident 0. No error
+ // flag, no data, no ... nothing.
+ //
+ // I read a bit through bsd/kern_event.c from the xnu source, but I
+ // don't really see an obvious location where this is triggered –
+ // this doesn't seem intentional, but idk...
+ //
+ // Technically fd 0 is a valid descriptor, so only skip it if
+ // there's no path, and if we're on macOS.
+ if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" {
+ continue
+ }
- event := w.newEvent(path.name, mask)
+ event := w.newEvent(path.name, path.linkName, mask)
if event.Has(Rename) || event.Has(Remove) {
w.remove(event.Name, false)
- w.mu.Lock()
- delete(w.fileExists, event.Name)
- w.mu.Unlock()
+ w.watches.markSeen(event.Name, false)
}
if path.isDir && event.Has(Write) && !event.Has(Remove) {
- w.sendDirectoryChangeEvents(event.Name)
- } else {
- if !w.sendEvent(event) {
- closed = true
- continue
- }
+ w.dirChange(event.Name)
+ } else if !w.sendEvent(event) {
+ return
}
if event.Has(Remove) {
@@ -594,25 +501,34 @@ func (w *Watcher) readEvents() {
// mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
- w.mu.Lock()
- _, found := w.watches[fileDir]
- w.mu.Unlock()
+ _, found := w.watches.byPath(fileDir)
if found {
- err := w.sendDirectoryChangeEvents(fileDir)
- if err != nil {
- if !w.sendError(err) {
- closed = true
- }
+ // TODO: this branch is never triggered in any test.
+ // Added in d6220df (2012).
+ // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111
+ //
+ // I don't really get how this can be triggered either.
+ // And it wasn't triggered in the patch that added it,
+ // either.
+ //
+ // Original also had a comment:
+ // make sure the directory exists before we watch for
+ // changes. When we do a recursive watch and perform
+ // rm -rf, the parent directory might have gone
+ // missing, ignore the missing directory and let the
+ // upcoming delete event remove the watch from the
+ // parent directory.
+ err := w.dirChange(fileDir)
+ if !w.sendError(err) {
+ return
}
}
} else {
- filePath := filepath.Clean(event.Name)
- if fi, err := os.Lstat(filePath); err == nil {
- err := w.sendFileCreatedEventIfNew(filePath, fi)
- if err != nil {
- if !w.sendError(err) {
- closed = true
- }
+ path := filepath.Clean(event.Name)
+ if fi, err := os.Lstat(path); err == nil {
+ err := w.sendCreateIfNew(path, fi)
+ if !w.sendError(err) {
+ return
}
}
}
@@ -622,8 +538,14 @@ func (w *Watcher) readEvents() {
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
-func (w *Watcher) newEvent(name string, mask uint32) Event {
+func (w *kqueue) newEvent(name, linkName string, mask uint32) Event {
e := Event{Name: name}
+ if linkName != "" {
+ // If the user watched "/path/link" then emit events as "/path/link"
+ // rather than "/path/target".
+ e.Name = linkName
+ }
+
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
e.Op |= Remove
}
@@ -645,8 +567,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
-func (w *Watcher) watchDirectoryFiles(dirPath string) error {
- // Get all files
+func (w *kqueue) watchDirectoryFiles(dirPath string) error {
files, err := os.ReadDir(dirPath)
if err != nil {
return err
@@ -674,9 +595,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
}
}
- w.mu.Lock()
- w.fileExists[cleanPath] = struct{}{}
- w.mu.Unlock()
+ w.watches.markSeen(cleanPath, true)
}
return nil
@@ -686,7 +605,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
//
// This functionality is to have the BSD watcher match the inotify, which sends
// a create event for files created in a watched directory.
-func (w *Watcher) sendDirectoryChangeEvents(dir string) error {
+func (w *kqueue) dirChange(dir string) error {
files, err := os.ReadDir(dir)
if err != nil {
// Directory no longer exists: we can ignore this safely. kqueue will
@@ -694,69 +613,62 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error {
if errors.Is(err, os.ErrNotExist) {
return nil
}
- return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
+ return fmt.Errorf("fsnotify.dirChange %q: %w", dir, err)
}
for _, f := range files {
fi, err := f.Info()
if err != nil {
- return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
+ if errors.Is(err, os.ErrNotExist) {
+ return nil
+ }
+ return fmt.Errorf("fsnotify.dirChange: %w", err)
}
- err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
+ err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi)
if err != nil {
// Don't need to send an error if this file isn't readable.
- if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) {
+ if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) || errors.Is(err, os.ErrNotExist) {
return nil
}
- return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
+ return fmt.Errorf("fsnotify.dirChange: %w", err)
}
}
return nil
}
-// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
-func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) {
- w.mu.Lock()
- _, doesExist := w.fileExists[filePath]
- w.mu.Unlock()
- if !doesExist {
- if !w.sendEvent(Event{Name: filePath, Op: Create}) {
- return
+// Send a create event if the file isn't already being tracked, and start
+// watching this file.
+func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error {
+ if !w.watches.seenBefore(path) {
+ if !w.sendEvent(Event{Name: path, Op: Create}) {
+ return nil
}
}
- // like watchDirectoryFiles (but without doing another ReadDir)
- filePath, err = w.internalWatch(filePath, fi)
+ // Like watchDirectoryFiles, but without doing another ReadDir.
+ path, err := w.internalWatch(path, fi)
if err != nil {
return err
}
-
- w.mu.Lock()
- w.fileExists[filePath] = struct{}{}
- w.mu.Unlock()
-
+ w.watches.markSeen(path, true)
return nil
}
-func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) {
+func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) {
if fi.IsDir() {
// mimic Linux providing delete events for subdirectories, but preserve
// the flags used if currently watching subdirectory
- w.mu.Lock()
- flags := w.dirFlags[name]
- w.mu.Unlock()
-
- flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
- return w.addWatch(name, flags)
+ info, _ := w.watches.byPath(name)
+ return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME, true)
}
- // watch file to mimic Linux inotify
- return w.addWatch(name, noteAllEvents)
+ // Watch file to mimic Linux inotify.
+ return w.addWatch(name, noteAllEvents, true)
}
// Register events with the queue.
-func (w *Watcher) register(fds []int, flags int, fflags uint32) error {
+func (w *kqueue) register(fds []int, flags int, fflags uint32) error {
changes := make([]unix.Kevent_t, len(fds))
for i, fd := range fds {
// SetKevent converts int to the platform-specific types.
@@ -773,10 +685,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error {
}
// read retrieves pending events, or waits until an event occurs.
-func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
+func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
n, err := unix.Kevent(w.kq, nil, events, nil)
if err != nil {
return nil, err
}
return events[0:n], nil
}
+
+func (w *kqueue) xSupports(op Op) bool {
+ //if runtime.GOOS == "freebsd" {
+ // return true // Supports everything.
+ //}
+ if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
+ op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go
index d34a23c01..b8c0ad722 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_other.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go
@@ -1,205 +1,22 @@
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
-// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
-
-// Note: the documentation on the Watcher type and methods is generated from
-// mkdoc.zsh
package fsnotify
import "errors"
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # Windows notes
-//
-// Paths can be added as "C:\path\to\dir", but forward slashes
-// ("C:/path/to/dir") will also work.
-//
-// When a watched directory is removed it will always send an event for the
-// directory itself, but may not send events for all files in that directory.
-// Sometimes it will send events for all times, sometimes it will send no
-// events, and often only for some files.
-//
-// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
-// value that is guaranteed to work with SMB filesystems. If you have many
-// events in quick succession this may not be enough, and you will have to use
-// [WithBufferSize] to increase the value.
-type Watcher struct {
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, and you may
- // want to wait until you've stopped receiving them
- // (see the dedup example in cmd/fsnotify).
- //
- // Some systems may send Write event for directories
- // when the directory content changes.
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // when a file is truncated. On Windows it's never
- // sent.
+type other struct {
Events chan Event
-
- // Errors sends any errors.
- //
- // ErrEventOverflow is used to indicate there are too many events:
- //
- // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
- // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
- // - kqueue, fen: Not used.
Errors chan error
}
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
+var defaultBufferSize = 0
+
+func newBackend(ev chan Event, errs chan error) (backend, error) {
return nil, errors.New("fsnotify not supported on the current platform")
}
-
-// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
-// channel.
-//
-// The main use case for this is situations with a very large number of events
-// where the kernel buffer size can't be increased (e.g. due to lack of
-// permissions). An unbuffered Watcher will perform better for almost all use
-// cases, and whenever possible you will be better off increasing the kernel
-// buffers instead of adding a large userspace buffer.
-func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() }
-
-// Close removes all watches and closes the Events channel.
-func (w *Watcher) Close() error { return nil }
-
-// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
-// yet removed).
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) WatchList() []string { return nil }
-
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; watching it more than once is a no-op and will
-// not return an error. Paths that do not yet exist on the filesystem cannot be
-// watched.
-//
-// A watch will be automatically removed if the watched path is deleted or
-// renamed. The exception is the Windows backend, which doesn't remove the
-// watcher on renames.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// Returns [ErrClosed] if [Watcher.Close] was called.
-//
-// See [Watcher.AddWith] for a version that allows adding options.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many programs (especially editors) update files atomically: it
-// will write to a temporary file which is then moved to to destination,
-// overwriting the original (or some variant thereof). The watcher on the
-// original file is now lost, as that no longer exists.
-//
-// The upshot of this is that a power failure or crash won't leave a
-// half-written file.
-//
-// Watch the parent directory and use Event.Name to filter out files you're not
-// interested in. There is an example of this in cmd/fsnotify/file.go.
-func (w *Watcher) Add(name string) error { return nil }
-
-// AddWith is like [Watcher.Add], but allows adding options. When using Add()
-// the defaults described below are used.
-//
-// Possible options are:
-//
-// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
-// other platforms. The default is 64K (65536 bytes).
-func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil }
-
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) Remove(name string) error { return nil }
+func (w *other) Close() error { return nil }
+func (w *other) WatchList() []string { return nil }
+func (w *other) Add(name string) error { return nil }
+func (w *other) AddWith(name string, opts ...addOpt) error { return nil }
+func (w *other) Remove(name string) error { return nil }
+func (w *other) xSupports(op Op) bool { return false }
diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go
index 9bc91e5d6..3433642d6 100644
--- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go
+++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go
@@ -1,12 +1,8 @@
//go:build windows
-// +build windows
// Windows backend based on ReadDirectoryChangesW()
//
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
-//
-// Note: the documentation on the Watcher type and methods is generated from
-// mkdoc.zsh
package fsnotify
@@ -19,196 +15,80 @@ import (
"runtime"
"strings"
"sync"
+ "time"
"unsafe"
+ "github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/windows"
)
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # Windows notes
-//
-// Paths can be added as "C:\path\to\dir", but forward slashes
-// ("C:/path/to/dir") will also work.
-//
-// When a watched directory is removed it will always send an event for the
-// directory itself, but may not send events for all files in that directory.
-// Sometimes it will send events for all times, sometimes it will send no
-// events, and often only for some files.
-//
-// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
-// value that is guaranteed to work with SMB filesystems. If you have many
-// events in quick succession this may not be enough, and you will have to use
-// [WithBufferSize] to increase the value.
-type Watcher struct {
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, and you may
- // want to wait until you've stopped receiving them
- // (see the dedup example in cmd/fsnotify).
- //
- // Some systems may send Write event for directories
- // when the directory content changes.
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // when a file is truncated. On Windows it's never
- // sent.
+type readDirChangesW struct {
Events chan Event
-
- // Errors sends any errors.
- //
- // ErrEventOverflow is used to indicate there are too many events:
- //
- // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
- // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
- // - kqueue, fen: Not used.
Errors chan error
port windows.Handle // Handle to completion port
input chan *input // Inputs to the reader are sent on this channel
- quit chan chan<- error
+ done chan chan<- error
mu sync.Mutex // Protects access to watches, closed
watches watchMap // Map of watches (key: i-number)
closed bool // Set to true when Close() is first called
}
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
- return NewBufferedWatcher(50)
-}
+var defaultBufferSize = 50
-// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
-// channel.
-//
-// The main use case for this is situations with a very large number of events
-// where the kernel buffer size can't be increased (e.g. due to lack of
-// permissions). An unbuffered Watcher will perform better for almost all use
-// cases, and whenever possible you will be better off increasing the kernel
-// buffers instead of adding a large userspace buffer.
-func NewBufferedWatcher(sz uint) (*Watcher, error) {
+func newBackend(ev chan Event, errs chan error) (backend, error) {
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
if err != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
}
- w := &Watcher{
+ w := &readDirChangesW{
+ Events: ev,
+ Errors: errs,
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
- Events: make(chan Event, sz),
- Errors: make(chan error),
- quit: make(chan chan<- error, 1),
+ done: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
}
-func (w *Watcher) isClosed() bool {
+func (w *readDirChangesW) isClosed() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.closed
}
-func (w *Watcher) sendEvent(name string, mask uint64) bool {
+func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool {
if mask == 0 {
return false
}
event := w.newEvent(name, uint32(mask))
+ event.renamedFrom = renamedFrom
select {
- case ch := <-w.quit:
- w.quit <- ch
+ case ch := <-w.done:
+ w.done <- ch
case w.Events <- event:
}
return true
}
// Returns true if the error was sent, or false if watcher is closed.
-func (w *Watcher) sendError(err error) bool {
+func (w *readDirChangesW) sendError(err error) bool {
+ if err == nil {
+ return true
+ }
select {
+ case <-w.done:
+ return false
case w.Errors <- err:
return true
- case <-w.quit:
}
- return false
}
-// Close removes all watches and closes the Events channel.
-func (w *Watcher) Close() error {
+func (w *readDirChangesW) Close() error {
if w.isClosed() {
return nil
}
@@ -217,66 +97,30 @@ func (w *Watcher) Close() error {
w.closed = true
w.mu.Unlock()
- // Send "quit" message to the reader goroutine
+ // Send "done" message to the reader goroutine
ch := make(chan error)
- w.quit <- ch
+ w.done <- ch
if err := w.wakeupReader(); err != nil {
return err
}
return <-ch
}
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; watching it more than once is a no-op and will
-// not return an error. Paths that do not yet exist on the filesystem cannot be
-// watched.
-//
-// A watch will be automatically removed if the watched path is deleted or
-// renamed. The exception is the Windows backend, which doesn't remove the
-// watcher on renames.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// Returns [ErrClosed] if [Watcher.Close] was called.
-//
-// See [Watcher.AddWith] for a version that allows adding options.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many programs (especially editors) update files atomically: it
-// will write to a temporary file which is then moved to to destination,
-// overwriting the original (or some variant thereof). The watcher on the
-// original file is now lost, as that no longer exists.
-//
-// The upshot of this is that a power failure or crash won't leave a
-// half-written file.
-//
-// Watch the parent directory and use Event.Name to filter out files you're not
-// interested in. There is an example of this in cmd/fsnotify/file.go.
-func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) }
-// AddWith is like [Watcher.Add], but allows adding options. When using Add()
-// the defaults described below are used.
-//
-// Possible options are:
-//
-// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
-// other platforms. The default is 64K (65536 bytes).
-func (w *Watcher) AddWith(name string, opts ...addOpt) error {
+func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
+ time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
+ }
with := getOptions(opts...)
+ if !w.xSupports(with.op) {
+ return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
+ }
if with.bufsize < 4096 {
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
}
@@ -295,18 +139,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
return <-in.reply
}
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) Remove(name string) error {
+func (w *readDirChangesW) Remove(name string) error {
if w.isClosed() {
return nil
}
+ if debug {
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
+ time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
+ }
in := &input{
op: opRemoveWatch,
@@ -320,11 +160,7 @@ func (w *Watcher) Remove(name string) error {
return <-in.reply
}
-// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
-// yet removed).
-//
-// Returns nil if [Watcher.Close] was called.
-func (w *Watcher) WatchList() []string {
+func (w *readDirChangesW) WatchList() []string {
if w.isClosed() {
return nil
}
@@ -335,7 +171,13 @@ func (w *Watcher) WatchList() []string {
entries := make([]string, 0, len(w.watches))
for _, entry := range w.watches {
for _, watchEntry := range entry {
- entries = append(entries, watchEntry.path)
+ for name := range watchEntry.names {
+ entries = append(entries, filepath.Join(watchEntry.path, name))
+ }
+ // the directory itself is being watched
+ if watchEntry.mask != 0 {
+ entries = append(entries, watchEntry.path)
+ }
}
}
@@ -361,7 +203,7 @@ const (
sysFSIGNORED = 0x8000
)
-func (w *Watcher) newEvent(name string, mask uint32) Event {
+func (w *readDirChangesW) newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
e.Op |= Create
@@ -417,7 +259,7 @@ type (
watchMap map[uint32]indexMap
)
-func (w *Watcher) wakeupReader() error {
+func (w *readDirChangesW) wakeupReader() error {
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
if err != nil {
return os.NewSyscallError("PostQueuedCompletionStatus", err)
@@ -425,7 +267,7 @@ func (w *Watcher) wakeupReader() error {
return nil
}
-func (w *Watcher) getDir(pathname string) (dir string, err error) {
+func (w *readDirChangesW) getDir(pathname string) (dir string, err error) {
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
if err != nil {
return "", os.NewSyscallError("GetFileAttributes", err)
@@ -439,7 +281,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) {
return
}
-func (w *Watcher) getIno(path string) (ino *inode, err error) {
+func (w *readDirChangesW) getIno(path string) (ino *inode, err error) {
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
windows.FILE_LIST_DIRECTORY,
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
@@ -482,9 +324,8 @@ func (m watchMap) set(ino *inode, watch *watch) {
}
// Must run within the I/O thread.
-func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
- //pathname, recurse := recursivePath(pathname)
- recurse := false
+func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error {
+ pathname, recurse := recursivePath(pathname)
dir, err := w.getDir(pathname)
if err != nil {
@@ -538,7 +379,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
}
// Must run within the I/O thread.
-func (w *Watcher) remWatch(pathname string) error {
+func (w *readDirChangesW) remWatch(pathname string) error {
pathname, recurse := recursivePath(pathname)
dir, err := w.getDir(pathname)
@@ -566,11 +407,11 @@ func (w *Watcher) remWatch(pathname string) error {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
}
if pathname == dir {
- w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
watch.mask = 0
} else {
name := filepath.Base(pathname)
- w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+ w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
@@ -578,23 +419,23 @@ func (w *Watcher) remWatch(pathname string) error {
}
// Must run within the I/O thread.
-func (w *Watcher) deleteWatch(watch *watch) {
+func (w *readDirChangesW) deleteWatch(watch *watch) {
for name, mask := range watch.names {
if mask&provisional == 0 {
- w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+ w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED)
}
delete(watch.names, name)
}
if watch.mask != 0 {
if watch.mask&provisional == 0 {
- w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
}
watch.mask = 0
}
}
// Must run within the I/O thread.
-func (w *Watcher) startRead(watch *watch) error {
+func (w *readDirChangesW) startRead(watch *watch) error {
err := windows.CancelIo(watch.ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CancelIo", err))
@@ -624,7 +465,7 @@ func (w *Watcher) startRead(watch *watch) error {
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
// Watched directory was probably removed
- w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+ w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
err = nil
}
w.deleteWatch(watch)
@@ -637,7 +478,7 @@ func (w *Watcher) startRead(watch *watch) error {
// readEvents reads from the I/O completion port, converts the
// received events into Event objects and sends them via the Events channel.
// Entry point to the I/O thread.
-func (w *Watcher) readEvents() {
+func (w *readDirChangesW) readEvents() {
var (
n uint32
key uintptr
@@ -652,7 +493,7 @@ func (w *Watcher) readEvents() {
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
select {
- case ch := <-w.quit:
+ case ch := <-w.done:
w.mu.Lock()
var indexes []indexMap
for _, index := range w.watches {
@@ -700,7 +541,7 @@ func (w *Watcher) readEvents() {
}
case windows.ERROR_ACCESS_DENIED:
// Watched directory was probably removed
- w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+ w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
w.deleteWatch(watch)
w.startRead(watch)
continue
@@ -733,6 +574,10 @@ func (w *Watcher) readEvents() {
name := windows.UTF16ToString(buf)
fullname := filepath.Join(watch.path, name)
+ if debug {
+ internal.Debug(fullname, raw.Action)
+ }
+
var mask uint64
switch raw.Action {
case windows.FILE_ACTION_REMOVED:
@@ -761,21 +606,22 @@ func (w *Watcher) readEvents() {
}
}
- sendNameEvent := func() {
- w.sendEvent(fullname, watch.names[name]&mask)
- }
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
- sendNameEvent()
+ w.sendEvent(fullname, "", watch.names[name]&mask)
}
if raw.Action == windows.FILE_ACTION_REMOVED {
- w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+ w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
- w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action))
+ if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
+ w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action))
+ } else {
+ w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action))
+ }
+
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
- fullname = filepath.Join(watch.path, watch.rename)
- sendNameEvent()
+ w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask)
}
// Move to the next event in the buffer
@@ -787,8 +633,7 @@ func (w *Watcher) readEvents() {
// Error!
if offset >= n {
//lint:ignore ST1005 Windows should be capitalized
- w.sendError(errors.New(
- "Windows system assumed buffer larger than it is, events have likely been missed"))
+ w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed"))
break
}
}
@@ -799,7 +644,7 @@ func (w *Watcher) readEvents() {
}
}
-func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
+func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 {
var m uint32
if mask&sysFSMODIFY != 0 {
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
@@ -810,7 +655,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
return m
}
-func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
+func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 {
switch action {
case windows.FILE_ACTION_ADDED:
return sysFSCREATE
@@ -825,3 +670,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
}
return 0
}
+
+func (w *readDirChangesW) xSupports(op Op) bool {
+ if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
+ op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
index 24c99cc49..f64be4bf9 100644
--- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -3,19 +3,146 @@
//
// Currently supported systems:
//
-// Linux 2.6.32+ via inotify
-// BSD, macOS via kqueue
-// Windows via ReadDirectoryChangesW
-// illumos via FEN
+// - Linux via inotify
+// - BSD, macOS via kqueue
+// - Windows via ReadDirectoryChangesW
+// - illumos via FEN
+//
+// # FSNOTIFY_DEBUG
+//
+// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to
+// stderr. This can be useful to track down some problems, especially in cases
+// where fsnotify is used as an indirect dependency.
+//
+// Every event will be printed as soon as there's something useful to print,
+// with as little processing from fsnotify.
+//
+// Example output:
+//
+// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1"
+// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1"
+// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1"
package fsnotify
import (
"errors"
"fmt"
+ "os"
"path/filepath"
"strings"
)
+// Watcher watches a set of paths, delivering events on a channel.
+//
+// A watcher should not be copied (e.g. pass it by pointer, rather than by
+// value).
+//
+// # Linux notes
+//
+// When a file is removed a Remove event won't be emitted until all file
+// descriptors are closed, and deletes will always emit a Chmod. For example:
+//
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
+//
+// This is the event that inotify sends, so not much can be changed about this.
+//
+// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
+// for the number of watches per user, and fs.inotify.max_user_instances
+// specifies the maximum number of inotify instances per user. Every Watcher you
+// create is an "instance", and every path you add is a "watch".
+//
+// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
+// /proc/sys/fs/inotify/max_user_instances
+//
+// To increase them you can use sysctl or write the value to the /proc file:
+//
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
+//
+// To make the changes persist on reboot edit /etc/sysctl.conf or
+// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
+// your distro's documentation):
+//
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
+//
+// Reaching the limit will result in a "no space left on device" or "too many open
+// files" error.
+//
+// # kqueue notes (macOS, BSD)
+//
+// kqueue requires opening a file descriptor for every file that's being watched;
+// so if you're watching a directory with five files then that's six file
+// descriptors. You will run in to your system's "max open files" limit faster on
+// these platforms.
+//
+// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
+// control the maximum number of open files, as well as /etc/login.conf on BSD
+// systems.
+//
+// # Windows notes
+//
+// Paths can be added as "C:\\path\\to\\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
+//
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all files, sometimes it will send no
+// events, and often only for some files.
+//
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
+type Watcher struct {
+ b backend
+
+ // Events sends the filesystem change events.
+ //
+ // fsnotify can send the following events; a "path" here can refer to a
+ // file, directory, symbolic link, or special file like a FIFO.
+ //
+ // fsnotify.Create A new path was created; this may be followed by one
+ // or more Write events if data also gets written to a
+ // file.
+ //
+ // fsnotify.Remove A path was removed.
+ //
+ // fsnotify.Rename A path was renamed. A rename is always sent with the
+ // old path as Event.Name, and a Create event will be
+ // sent with the new name. Renames are only sent for
+ // paths that are currently watched; e.g. moving an
+ // unmonitored file into a monitored directory will
+ // show up as just a Create. Similarly, renaming a file
+ // to outside a monitored directory will show up as
+ // only a Rename.
+ //
+ // fsnotify.Write A file or named pipe was written to. A Truncate will
+ // also trigger a Write. A single "write action"
+ // initiated by the user may show up as one or multiple
+ // writes, depending on when the system syncs things to
+ // disk. For example when compiling a large Go program
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
+ //
+ // fsnotify.Chmod Attributes were changed. On Linux this is also sent
+ // when a file is removed (or more accurately, when a
+ // link to an inode is removed). On kqueue it's sent
+ // when a file is truncated. On Windows it's never
+ // sent.
+ Events chan Event
+
+ // Errors sends any errors.
+ Errors chan error
+}
+
// Event represents a file system notification.
type Event struct {
// Path to the file or directory.
@@ -30,6 +157,16 @@ type Event struct {
// This is a bitmask and some systems may send multiple operations at once.
// Use the Event.Has() method instead of comparing with ==.
Op Op
+
+ // Create events will have this set to the old path if it's a rename. This
+ // only works when both the source and destination are watched. It's not
+ // reliable when watching individual files, only directories.
+ //
+ // For example "mv /tmp/file /tmp/rename" will emit:
+ //
+ // Event{Op: Rename, Name: "/tmp/file"}
+ // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"}
+ renamedFrom string
}
// Op describes a set of file operations.
@@ -50,7 +187,7 @@ const (
// example "remove to trash" is often a rename).
Remove
- // The path was renamed to something else; any watched on it will be
+ // The path was renamed to something else; any watches on it will be
// removed.
Rename
@@ -60,15 +197,157 @@ const (
// get triggered very frequently by some software. For example, Spotlight
// indexing on macOS, anti-virus software, backup software, etc.
Chmod
+
+ // File descriptor was opened.
+ //
+ // Only works on Linux and FreeBSD.
+ xUnportableOpen
+
+ // File was read from.
+ //
+ // Only works on Linux and FreeBSD.
+ xUnportableRead
+
+ // File opened for writing was closed.
+ //
+ // Only works on Linux and FreeBSD.
+ //
+ // The advantage of using this over Write is that it's more reliable than
+ // waiting for Write events to stop. It's also faster (if you're not
+ // listening to Write events): copying a file of a few GB can easily
+ // generate tens of thousands of Write events in a short span of time.
+ xUnportableCloseWrite
+
+ // File opened for reading was closed.
+ //
+ // Only works on Linux and FreeBSD.
+ xUnportableCloseRead
)
-// Common errors that can be reported.
var (
+ // ErrNonExistentWatch is used when Remove() is called on a path that's not
+ // added.
ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
- ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
- ErrClosed = errors.New("fsnotify: watcher already closed")
+
+ // ErrClosed is used when trying to operate on a closed Watcher.
+ ErrClosed = errors.New("fsnotify: watcher already closed")
+
+ // ErrEventOverflow is reported from the Errors channel when there are too
+ // many events:
+ //
+ // - inotify: inotify returns IN_Q_OVERFLOW – because there are too
+ // many queued events (the fs.inotify.max_queued_events
+ // sysctl can be used to increase this).
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
+ ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
+
+ // ErrUnsupported is returned by AddWith() when WithOps() specified an
+ // Unportable event that's not supported on this platform.
+ //lint:ignore ST1012 not relevant
+ xErrUnsupported = errors.New("fsnotify: not supported with this backend")
)
+// NewWatcher creates a new Watcher.
+func NewWatcher() (*Watcher, error) {
+ ev, errs := make(chan Event, defaultBufferSize), make(chan error)
+ b, err := newBackend(ev, errs)
+ if err != nil {
+ return nil, err
+ }
+ return &Watcher{b: b, Events: ev, Errors: errs}, nil
+}
+
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) {
+ ev, errs := make(chan Event, sz), make(chan error)
+ b, err := newBackend(ev, errs)
+ if err != nil {
+ return nil, err
+ }
+ return &Watcher{b: b, Events: ev, Errors: errs}, nil
+}
+
+// Add starts monitoring the path for changes.
+//
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
+//
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
+//
+// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
+// filesystems (/proc, /sys, etc.) generally don't work.
+//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
+// # Watching directories
+//
+// All files in a directory are monitored, including new files that are created
+// after the watcher is started. Subdirectories are not watched (i.e. it's
+// non-recursive).
+//
+// # Watching files
+//
+// Watching individual files (rather than directories) is generally not
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(path string) error { return w.b.Add(path) }
+
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) }
+
+// Remove stops monitoring the path for changes.
+//
+// Directories are always removed non-recursively. For example, if you added
+// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
+//
+// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) Remove(path string) error { return w.b.Remove(path) }
+
+// Close removes all watches and closes the Events channel.
+func (w *Watcher) Close() error { return w.b.Close() }
+
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// The order is undefined, and may differ per call. Returns nil if
+// [Watcher.Close] was called.
+func (w *Watcher) WatchList() []string { return w.b.WatchList() }
+
+// Supports reports if all the listed operations are supported by this platform.
+//
+// Create, Write, Remove, Rename, and Chmod are always supported. It can only
+// return false for an Op starting with Unportable.
+func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) }
+
func (o Op) String() string {
var b strings.Builder
if o.Has(Create) {
@@ -80,6 +359,18 @@ func (o Op) String() string {
if o.Has(Write) {
b.WriteString("|WRITE")
}
+ if o.Has(xUnportableOpen) {
+ b.WriteString("|OPEN")
+ }
+ if o.Has(xUnportableRead) {
+ b.WriteString("|READ")
+ }
+ if o.Has(xUnportableCloseWrite) {
+ b.WriteString("|CLOSE_WRITE")
+ }
+ if o.Has(xUnportableCloseRead) {
+ b.WriteString("|CLOSE_READ")
+ }
if o.Has(Rename) {
b.WriteString("|RENAME")
}
@@ -100,24 +391,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) }
// String returns a string representation of the event with their path.
func (e Event) String() string {
+ if e.renamedFrom != "" {
+ return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom)
+ }
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
}
type (
+ backend interface {
+ Add(string) error
+ AddWith(string, ...addOpt) error
+ Remove(string) error
+ WatchList() []string
+ Close() error
+ xSupports(Op) bool
+ }
addOpt func(opt *withOpts)
withOpts struct {
- bufsize int
+ bufsize int
+ op Op
+ noFollow bool
+ sendCreate bool
}
)
+var debug = func() bool {
+ // Check for exactly "1" (rather than mere existence) so we can add
+ // options/flags in the future. I don't know if we ever want that, but it's
+ // nice to leave the option open.
+ return os.Getenv("FSNOTIFY_DEBUG") == "1"
+}()
+
var defaultOpts = withOpts{
bufsize: 65536, // 64K
+ op: Create | Write | Remove | Rename | Chmod,
}
func getOptions(opts ...addOpt) withOpts {
with := defaultOpts
for _, o := range opts {
- o(&with)
+ if o != nil {
+ o(&with)
+ }
}
return with
}
@@ -136,9 +451,44 @@ func WithBufferSize(bytes int) addOpt {
return func(opt *withOpts) { opt.bufsize = bytes }
}
+// WithOps sets which operations to listen for. The default is [Create],
+// [Write], [Remove], [Rename], and [Chmod].
+//
+// Excluding operations you're not interested in can save quite a bit of CPU
+// time; in some use cases there may be hundreds of thousands of useless Write
+// or Chmod operations per second.
+//
+// This can also be used to add unportable operations not supported by all
+// platforms; unportable operations all start with "Unportable":
+// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and
+// [UnportableCloseRead].
+//
+// AddWith returns an error when using an unportable operation that's not
+// supported. Use [Watcher.Support] to check for support.
+func withOps(op Op) addOpt {
+ return func(opt *withOpts) { opt.op = op }
+}
+
+// WithNoFollow disables following symlinks, so the symlinks themselves are
+// watched.
+func withNoFollow() addOpt {
+ return func(opt *withOpts) { opt.noFollow = true }
+}
+
+// "Internal" option for recursive watches on inotify.
+func withCreate() addOpt {
+ return func(opt *withOpts) { opt.sendCreate = true }
+}
+
+var enableRecurse = false
+
// Check if this path is recursive (ends with "/..." or "\..."), and return the
// path with the /... stripped.
func recursivePath(path string) (string, bool) {
+ path = filepath.Clean(path)
+ if !enableRecurse { // Only enabled in tests for now.
+ return path, false
+ }
if filepath.Base(path) == "..." {
return filepath.Dir(path), true
}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go
new file mode 100644
index 000000000..0b01bc182
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go
@@ -0,0 +1,39 @@
+//go:build darwin
+
+package internal
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ ErrSyscallEACCES = syscall.EACCES
+ ErrUnixEACCES = unix.EACCES
+)
+
+var maxfiles uint64
+
+func SetRlimit() {
+ // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
+ var l syscall.Rlimit
+ err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
+ if err == nil && l.Cur != l.Max {
+ l.Cur = l.Max
+ syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
+ }
+ maxfiles = l.Cur
+
+ if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles {
+ maxfiles = uint64(n)
+ }
+
+ if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles {
+ maxfiles = uint64(n)
+ }
+}
+
+func Maxfiles() uint64 { return maxfiles }
+func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
+func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
new file mode 100644
index 000000000..928319fb0
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
@@ -0,0 +1,57 @@
+package internal
+
+import "golang.org/x/sys/unix"
+
+var names = []struct {
+ n string
+ m uint32
+}{
+ {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE},
+ {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
+ {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND},
+ {"NOTE_CHILD", unix.NOTE_CHILD},
+ {"NOTE_CRITICAL", unix.NOTE_CRITICAL},
+ {"NOTE_DELETE", unix.NOTE_DELETE},
+ {"NOTE_EXEC", unix.NOTE_EXEC},
+ {"NOTE_EXIT", unix.NOTE_EXIT},
+ {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS},
+ {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR},
+ {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL},
+ {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL},
+ {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK},
+ {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY},
+ {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED},
+ {"NOTE_EXTEND", unix.NOTE_EXTEND},
+ {"NOTE_FFAND", unix.NOTE_FFAND},
+ {"NOTE_FFCOPY", unix.NOTE_FFCOPY},
+ {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
+ {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
+ {"NOTE_FFNOP", unix.NOTE_FFNOP},
+ {"NOTE_FFOR", unix.NOTE_FFOR},
+ {"NOTE_FORK", unix.NOTE_FORK},
+ {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK},
+ {"NOTE_LEEWAY", unix.NOTE_LEEWAY},
+ {"NOTE_LINK", unix.NOTE_LINK},
+ {"NOTE_LOWAT", unix.NOTE_LOWAT},
+ {"NOTE_MACHTIME", unix.NOTE_MACHTIME},
+ {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME},
+ {"NOTE_NONE", unix.NOTE_NONE},
+ {"NOTE_NSECONDS", unix.NOTE_NSECONDS},
+ {"NOTE_OOB", unix.NOTE_OOB},
+ //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!)
+ {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
+ {"NOTE_REAP", unix.NOTE_REAP},
+ {"NOTE_RENAME", unix.NOTE_RENAME},
+ {"NOTE_REVOKE", unix.NOTE_REVOKE},
+ {"NOTE_SECONDS", unix.NOTE_SECONDS},
+ {"NOTE_SIGNAL", unix.NOTE_SIGNAL},
+ {"NOTE_TRACK", unix.NOTE_TRACK},
+ {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
+ {"NOTE_TRIGGER", unix.NOTE_TRIGGER},
+ {"NOTE_USECONDS", unix.NOTE_USECONDS},
+ {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR},
+ {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE},
+ {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE},
+ {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE},
+ {"NOTE_WRITE", unix.NOTE_WRITE},
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
new file mode 100644
index 000000000..3186b0c34
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
@@ -0,0 +1,33 @@
+package internal
+
+import "golang.org/x/sys/unix"
+
+var names = []struct {
+ n string
+ m uint32
+}{
+ {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
+ {"NOTE_CHILD", unix.NOTE_CHILD},
+ {"NOTE_DELETE", unix.NOTE_DELETE},
+ {"NOTE_EXEC", unix.NOTE_EXEC},
+ {"NOTE_EXIT", unix.NOTE_EXIT},
+ {"NOTE_EXTEND", unix.NOTE_EXTEND},
+ {"NOTE_FFAND", unix.NOTE_FFAND},
+ {"NOTE_FFCOPY", unix.NOTE_FFCOPY},
+ {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
+ {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
+ {"NOTE_FFNOP", unix.NOTE_FFNOP},
+ {"NOTE_FFOR", unix.NOTE_FFOR},
+ {"NOTE_FORK", unix.NOTE_FORK},
+ {"NOTE_LINK", unix.NOTE_LINK},
+ {"NOTE_LOWAT", unix.NOTE_LOWAT},
+ {"NOTE_OOB", unix.NOTE_OOB},
+ {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
+ {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
+ {"NOTE_RENAME", unix.NOTE_RENAME},
+ {"NOTE_REVOKE", unix.NOTE_REVOKE},
+ {"NOTE_TRACK", unix.NOTE_TRACK},
+ {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
+ {"NOTE_TRIGGER", unix.NOTE_TRIGGER},
+ {"NOTE_WRITE", unix.NOTE_WRITE},
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
new file mode 100644
index 000000000..f69fdb930
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
@@ -0,0 +1,42 @@
+package internal
+
+import "golang.org/x/sys/unix"
+
+var names = []struct {
+ n string
+ m uint32
+}{
+ {"NOTE_ABSTIME", unix.NOTE_ABSTIME},
+ {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
+ {"NOTE_CHILD", unix.NOTE_CHILD},
+ {"NOTE_CLOSE", unix.NOTE_CLOSE},
+ {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE},
+ {"NOTE_DELETE", unix.NOTE_DELETE},
+ {"NOTE_EXEC", unix.NOTE_EXEC},
+ {"NOTE_EXIT", unix.NOTE_EXIT},
+ {"NOTE_EXTEND", unix.NOTE_EXTEND},
+ {"NOTE_FFAND", unix.NOTE_FFAND},
+ {"NOTE_FFCOPY", unix.NOTE_FFCOPY},
+ {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
+ {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
+ {"NOTE_FFNOP", unix.NOTE_FFNOP},
+ {"NOTE_FFOR", unix.NOTE_FFOR},
+ {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL},
+ {"NOTE_FORK", unix.NOTE_FORK},
+ {"NOTE_LINK", unix.NOTE_LINK},
+ {"NOTE_LOWAT", unix.NOTE_LOWAT},
+ {"NOTE_MSECONDS", unix.NOTE_MSECONDS},
+ {"NOTE_NSECONDS", unix.NOTE_NSECONDS},
+ {"NOTE_OPEN", unix.NOTE_OPEN},
+ {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
+ {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
+ {"NOTE_READ", unix.NOTE_READ},
+ {"NOTE_RENAME", unix.NOTE_RENAME},
+ {"NOTE_REVOKE", unix.NOTE_REVOKE},
+ {"NOTE_SECONDS", unix.NOTE_SECONDS},
+ {"NOTE_TRACK", unix.NOTE_TRACK},
+ {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
+ {"NOTE_TRIGGER", unix.NOTE_TRIGGER},
+ {"NOTE_USECONDS", unix.NOTE_USECONDS},
+ {"NOTE_WRITE", unix.NOTE_WRITE},
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
new file mode 100644
index 000000000..607e683bd
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
@@ -0,0 +1,32 @@
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin
+
+package internal
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+func Debug(name string, kevent *unix.Kevent_t) {
+ mask := uint32(kevent.Fflags)
+
+ var (
+ l []string
+ unknown = mask
+ )
+ for _, n := range names {
+ if mask&n.m == n.m {
+ l = append(l, n.n)
+ unknown ^= n.m
+ }
+ }
+ if unknown > 0 {
+ l = append(l, fmt.Sprintf("0x%x", unknown))
+ }
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n",
+ time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
new file mode 100644
index 000000000..35c734be4
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
@@ -0,0 +1,56 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+func Debug(name string, mask, cookie uint32) {
+ names := []struct {
+ n string
+ m uint32
+ }{
+ {"IN_ACCESS", unix.IN_ACCESS},
+ {"IN_ATTRIB", unix.IN_ATTRIB},
+ {"IN_CLOSE", unix.IN_CLOSE},
+ {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE},
+ {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE},
+ {"IN_CREATE", unix.IN_CREATE},
+ {"IN_DELETE", unix.IN_DELETE},
+ {"IN_DELETE_SELF", unix.IN_DELETE_SELF},
+ {"IN_IGNORED", unix.IN_IGNORED},
+ {"IN_ISDIR", unix.IN_ISDIR},
+ {"IN_MODIFY", unix.IN_MODIFY},
+ {"IN_MOVE", unix.IN_MOVE},
+ {"IN_MOVED_FROM", unix.IN_MOVED_FROM},
+ {"IN_MOVED_TO", unix.IN_MOVED_TO},
+ {"IN_MOVE_SELF", unix.IN_MOVE_SELF},
+ {"IN_OPEN", unix.IN_OPEN},
+ {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW},
+ {"IN_UNMOUNT", unix.IN_UNMOUNT},
+ }
+
+ var (
+ l []string
+ unknown = mask
+ )
+ for _, n := range names {
+ if mask&n.m == n.m {
+ l = append(l, n.n)
+ unknown ^= n.m
+ }
+ }
+ if unknown > 0 {
+ l = append(l, fmt.Sprintf("0x%x", unknown))
+ }
+ var c string
+ if cookie > 0 {
+ c = fmt.Sprintf("(cookie: %d) ", cookie)
+ }
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n",
+ time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name)
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
new file mode 100644
index 000000000..e5b3b6f69
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
@@ -0,0 +1,25 @@
+package internal
+
+import "golang.org/x/sys/unix"
+
+var names = []struct {
+ n string
+ m uint32
+}{
+ {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
+ {"NOTE_CHILD", unix.NOTE_CHILD},
+ {"NOTE_DELETE", unix.NOTE_DELETE},
+ {"NOTE_EXEC", unix.NOTE_EXEC},
+ {"NOTE_EXIT", unix.NOTE_EXIT},
+ {"NOTE_EXTEND", unix.NOTE_EXTEND},
+ {"NOTE_FORK", unix.NOTE_FORK},
+ {"NOTE_LINK", unix.NOTE_LINK},
+ {"NOTE_LOWAT", unix.NOTE_LOWAT},
+ {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
+ {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
+ {"NOTE_RENAME", unix.NOTE_RENAME},
+ {"NOTE_REVOKE", unix.NOTE_REVOKE},
+ {"NOTE_TRACK", unix.NOTE_TRACK},
+ {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
+ {"NOTE_WRITE", unix.NOTE_WRITE},
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
new file mode 100644
index 000000000..1dd455bc5
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
@@ -0,0 +1,28 @@
+package internal
+
+import "golang.org/x/sys/unix"
+
+var names = []struct {
+ n string
+ m uint32
+}{
+ {"NOTE_ATTRIB", unix.NOTE_ATTRIB},
+ // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386?
+ {"NOTE_CHILD", unix.NOTE_CHILD},
+ {"NOTE_DELETE", unix.NOTE_DELETE},
+ {"NOTE_EOF", unix.NOTE_EOF},
+ {"NOTE_EXEC", unix.NOTE_EXEC},
+ {"NOTE_EXIT", unix.NOTE_EXIT},
+ {"NOTE_EXTEND", unix.NOTE_EXTEND},
+ {"NOTE_FORK", unix.NOTE_FORK},
+ {"NOTE_LINK", unix.NOTE_LINK},
+ {"NOTE_LOWAT", unix.NOTE_LOWAT},
+ {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
+ {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
+ {"NOTE_RENAME", unix.NOTE_RENAME},
+ {"NOTE_REVOKE", unix.NOTE_REVOKE},
+ {"NOTE_TRACK", unix.NOTE_TRACK},
+ {"NOTE_TRACKERR", unix.NOTE_TRACKERR},
+ {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE},
+ {"NOTE_WRITE", unix.NOTE_WRITE},
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
new file mode 100644
index 000000000..f1b2e73bd
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
@@ -0,0 +1,45 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+func Debug(name string, mask int32) {
+ names := []struct {
+ n string
+ m int32
+ }{
+ {"FILE_ACCESS", unix.FILE_ACCESS},
+ {"FILE_MODIFIED", unix.FILE_MODIFIED},
+ {"FILE_ATTRIB", unix.FILE_ATTRIB},
+ {"FILE_TRUNC", unix.FILE_TRUNC},
+ {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW},
+ {"FILE_DELETE", unix.FILE_DELETE},
+ {"FILE_RENAME_TO", unix.FILE_RENAME_TO},
+ {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM},
+ {"UNMOUNTED", unix.UNMOUNTED},
+ {"MOUNTEDOVER", unix.MOUNTEDOVER},
+ {"FILE_EXCEPTION", unix.FILE_EXCEPTION},
+ }
+
+ var (
+ l []string
+ unknown = mask
+ )
+ for _, n := range names {
+ if mask&n.m == n.m {
+ l = append(l, n.n)
+ unknown ^= n.m
+ }
+ }
+ if unknown > 0 {
+ l = append(l, fmt.Sprintf("0x%x", unknown))
+ }
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n",
+ time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
new file mode 100644
index 000000000..52bf4ce53
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
@@ -0,0 +1,40 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+func Debug(name string, mask uint32) {
+ names := []struct {
+ n string
+ m uint32
+ }{
+ {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED},
+ {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED},
+ {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED},
+ {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME},
+ {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME},
+ }
+
+ var (
+ l []string
+ unknown = mask
+ )
+ for _, n := range names {
+ if mask&n.m == n.m {
+ l = append(l, n.n)
+ unknown ^= n.m
+ }
+ }
+ if unknown > 0 {
+ l = append(l, fmt.Sprintf("0x%x", unknown))
+ }
+ fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n",
+ time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name))
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
new file mode 100644
index 000000000..5ac8b5079
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
@@ -0,0 +1,31 @@
+//go:build freebsd
+
+package internal
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ ErrSyscallEACCES = syscall.EACCES
+ ErrUnixEACCES = unix.EACCES
+)
+
+var maxfiles uint64
+
+func SetRlimit() {
+ // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
+ var l syscall.Rlimit
+ err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
+ if err == nil && l.Cur != l.Max {
+ l.Cur = l.Max
+ syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
+ }
+ maxfiles = uint64(l.Cur)
+}
+
+func Maxfiles() uint64 { return maxfiles }
+func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
+func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) }
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go
new file mode 100644
index 000000000..7daa45e19
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go
@@ -0,0 +1,2 @@
+// Package internal contains some helpers.
+package internal
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go
new file mode 100644
index 000000000..b251fb803
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go
@@ -0,0 +1,31 @@
+//go:build !windows && !darwin && !freebsd && !plan9
+
+package internal
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ ErrSyscallEACCES = syscall.EACCES
+ ErrUnixEACCES = unix.EACCES
+)
+
+var maxfiles uint64
+
+func SetRlimit() {
+ // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
+ var l syscall.Rlimit
+ err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
+ if err == nil && l.Cur != l.Max {
+ l.Cur = l.Max
+ syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
+ }
+ maxfiles = uint64(l.Cur)
+}
+
+func Maxfiles() uint64 { return maxfiles }
+func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
+func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go
new file mode 100644
index 000000000..37dfeddc2
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go
@@ -0,0 +1,7 @@
+//go:build !windows
+
+package internal
+
+func HasPrivilegesForSymlink() bool {
+ return true
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go
new file mode 100644
index 000000000..896bc2e5a
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go
@@ -0,0 +1,41 @@
+//go:build windows
+
+package internal
+
+import (
+ "errors"
+
+ "golang.org/x/sys/windows"
+)
+
+// Just a dummy.
+var (
+ ErrSyscallEACCES = errors.New("dummy")
+ ErrUnixEACCES = errors.New("dummy")
+)
+
+func SetRlimit() {}
+func Maxfiles() uint64 { return 1<<64 - 1 }
+func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") }
+func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") }
+
+func HasPrivilegesForSymlink() bool {
+ var sid *windows.SID
+ err := windows.AllocateAndInitializeSid(
+ &windows.SECURITY_NT_AUTHORITY,
+ 2,
+ windows.SECURITY_BUILTIN_DOMAIN_RID,
+ windows.DOMAIN_ALIAS_RID_ADMINS,
+ 0, 0, 0, 0, 0, 0,
+ &sid)
+ if err != nil {
+ return false
+ }
+ defer windows.FreeSid(sid)
+ token := windows.Token(0)
+ member, err := token.IsMember(sid)
+ if err != nil {
+ return false
+ }
+ return member || token.IsElevated()
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
deleted file mode 100644
index 99012ae65..000000000
--- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/usr/bin/env zsh
-[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
-setopt err_exit no_unset pipefail extended_glob
-
-# Simple script to update the godoc comments on all watchers so you don't need
-# to update the same comment 5 times.
-
-watcher=$(</tmp/x
- print -r -- $cmt >>/tmp/x
- tail -n+$(( end + 1 )) $file >>/tmp/x
- mv /tmp/x $file
- done
-}
-
-set-cmt '^type Watcher struct ' $watcher
-set-cmt '^func NewWatcher(' $new
-set-cmt '^func NewBufferedWatcher(' $newbuffered
-set-cmt '^func (w \*Watcher) Add(' $add
-set-cmt '^func (w \*Watcher) AddWith(' $addwith
-set-cmt '^func (w \*Watcher) Remove(' $remove
-set-cmt '^func (w \*Watcher) Close(' $close
-set-cmt '^func (w \*Watcher) WatchList(' $watchlist
-set-cmt '^[[:space:]]*Events *chan Event$' $events
-set-cmt '^[[:space:]]*Errors *chan error$' $errors
diff --git a/vendor/github.com/fsnotify/fsnotify/shared.go b/vendor/github.com/fsnotify/fsnotify/shared.go
new file mode 100644
index 000000000..3ee9b58f1
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/shared.go
@@ -0,0 +1,64 @@
+package fsnotify
+
+import "sync"
+
+type shared struct {
+ Events chan Event
+ Errors chan error
+ done chan struct{}
+ mu sync.Mutex
+}
+
+func newShared(ev chan Event, errs chan error) *shared {
+ return &shared{
+ Events: ev,
+ Errors: errs,
+ done: make(chan struct{}),
+ }
+}
+
+// Returns true if the event was sent, or false if watcher is closed.
+func (w *shared) sendEvent(e Event) bool {
+ if e.Op == 0 {
+ return true
+ }
+ select {
+ case <-w.done:
+ return false
+ case w.Events <- e:
+ return true
+ }
+}
+
+// Returns true if the error was sent, or false if watcher is closed.
+func (w *shared) sendError(err error) bool {
+ if err == nil {
+ return true
+ }
+ select {
+ case <-w.done:
+ return false
+ case w.Errors <- err:
+ return true
+ }
+}
+
+func (w *shared) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+// Mark as closed; returns true if it was already closed.
+func (w *shared) close() bool {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if w.isClosed() {
+ return true
+ }
+ close(w.done)
+ return false
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/staticcheck.conf b/vendor/github.com/fsnotify/fsnotify/staticcheck.conf
new file mode 100644
index 000000000..8fa7351f0
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/staticcheck.conf
@@ -0,0 +1,3 @@
+checks = ['all',
+ '-U1000', # Don't complain about unused functions.
+]
diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go
index 4322b0b88..f65e8fe3e 100644
--- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go
+++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go
@@ -1,5 +1,4 @@
//go:build freebsd || openbsd || netbsd || dragonfly
-// +build freebsd openbsd netbsd dragonfly
package fsnotify
diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go
index 5da5ffa78..a29fc7aab 100644
--- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go
+++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go
@@ -1,5 +1,4 @@
//go:build darwin
-// +build darwin
package fsnotify
diff --git a/vendor/github.com/fxamacker/cbor/v2/.gitignore b/vendor/github.com/fxamacker/cbor/v2/.gitignore
new file mode 100644
index 000000000..f1c181ec9
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/.gitignore
@@ -0,0 +1,12 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
diff --git a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml
new file mode 100644
index 000000000..38cb9ae10
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml
@@ -0,0 +1,104 @@
+# Do not delete linter settings. Linters like gocritic can be enabled on the command line.
+
+linters-settings:
+ depguard:
+ rules:
+ prevent_unmaintained_packages:
+ list-mode: strict
+ files:
+ - $all
+ - "!$test"
+ allow:
+ - $gostd
+ - github.com/x448/float16
+ deny:
+ - pkg: io/ioutil
+ desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil"
+ dupl:
+ threshold: 100
+ funlen:
+ lines: 100
+ statements: 50
+ goconst:
+ ignore-tests: true
+ min-len: 2
+ min-occurrences: 3
+ gocritic:
+ enabled-tags:
+ - diagnostic
+ - experimental
+ - opinionated
+ - performance
+ - style
+ disabled-checks:
+ - commentedOutCode
+ - dupImport # https://github.com/go-critic/go-critic/issues/845
+ - ifElseChain
+ - octalLiteral
+ - paramTypeCombine
+ - whyNoLint
+ gofmt:
+ simplify: false
+ goimports:
+ local-prefixes: github.com/fxamacker/cbor
+ golint:
+ min-confidence: 0
+ govet:
+ check-shadowing: true
+ lll:
+ line-length: 140
+ maligned:
+ suggest-new: true
+ misspell:
+ locale: US
+ staticcheck:
+ checks: ["all"]
+
+linters:
+ disable-all: true
+ enable:
+ - asciicheck
+ - bidichk
+ - depguard
+ - errcheck
+ - exportloopref
+ - goconst
+ - gocritic
+ - gocyclo
+ - gofmt
+ - goimports
+ - goprintffuncname
+ - gosec
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - nilerr
+ - revive
+ - staticcheck
+ - stylecheck
+ - typecheck
+ - unconvert
+ - unused
+
+issues:
+ # max-issues-per-linter default is 50. Set to 0 to disable limit.
+ max-issues-per-linter: 0
+ # max-same-issues default is 3. Set to 0 to disable limit.
+ max-same-issues: 0
+
+ exclude-rules:
+ - path: decode.go
+ text: "string ` overflows ` has (\\d+) occurrences, make it a constant"
+ - path: decode.go
+ text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant"
+ - path: decode.go
+ text: "string `, ` has (\\d+) occurrences, make it a constant"
+ - path: decode.go
+ text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant"
+ - path: decode.go
+ text: "string `\\]\\)` has (\\d+) occurrences, make it a constant"
+ - path: valid.go
+ text: "string ` for type ` has (\\d+) occurrences, make it a constant"
+ - path: valid.go
+ text: "string `cbor: ` has (\\d+) occurrences, make it a constant"
diff --git a/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..c794b2b0c
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
@@ -0,0 +1,133 @@
+
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, caste, color, religion, or sexual
+identity and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the overall
+ community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or advances of
+ any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email address,
+ without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+faye.github@gmail.com.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series of
+actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or permanent
+ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within the
+community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.1, available at
+[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
+
+Community Impact Guidelines were inspired by
+[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
+
+For answers to common questions about this code of conduct, see the FAQ at
+[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
+[https://www.contributor-covenant.org/translations][translations].
+
+[homepage]: https://www.contributor-covenant.org
+[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
+[Mozilla CoC]: https://github.com/mozilla/diversity
+[FAQ]: https://www.contributor-covenant.org/faq
+[translations]: https://www.contributor-covenant.org/translations
diff --git a/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
new file mode 100644
index 000000000..de0965e12
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
@@ -0,0 +1,41 @@
+# How to contribute
+
+You can contribute by using the library, opening issues, or opening pull requests.
+
+## Bug reports and security vulnerabilities
+
+Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues).
+
+To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy).
+
+Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me.
+
+## Pull requests
+
+Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc.
+
+Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts.
+
+See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details.
+
+Pull requests have a greater chance of being approved if:
+- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature.
+- it has > 97% code coverage.
+
+## Describe your issue
+
+Clearly describe the issue:
+* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error.
+* If you propose a change or addition, try to give an example how the improved code could look like or how to use it.
+* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message.
+
+## Please don't
+
+Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me.
+
+Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me.
+
+## Credits
+
+- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22.
+- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements.
diff --git a/vendor/github.com/fxamacker/cbor/v2/LICENSE b/vendor/github.com/fxamacker/cbor/v2/LICENSE
new file mode 100644
index 000000000..eaa850492
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019-present Faye Amacker
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md
new file mode 100644
index 000000000..d072b81c7
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/README.md
@@ -0,0 +1,934 @@
+CBOR Codec 
+
+[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
+
+CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc. CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades.
+
+`fxamacker/cbor` is used in projects by Arm Ltd., EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes[*](https://github.com/search?q=org%3Akubernetes%20fxamacker%2Fcbor&type=code), Let's Encrypt, Linux Foundation, Microsoft, Oasis Protocol, Red Hat[*](https://github.com/search?q=org%3Aopenshift+fxamacker%2Fcbor&type=code), Tailscale[*](https://github.com/search?q=org%3Atailscale+fxamacker%2Fcbor&type=code), Veraison[*](https://github.com/search?q=org%3Averaison+fxamacker%2Fcbor&type=code), [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
+
+See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer.
+
+## fxamacker/cbor
+
+[](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
+[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22)
+[](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
+[](#fuzzing-and-code-coverage)
+[](https://goreportcard.com/report/github.com/fxamacker/cbor)
+[](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage)
+
+`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
+
+Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
+
+API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options.
+
+Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
+
+ 🔎 Highlights
+
+__🚀 Speed__
+
+Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data.
+
+__🔒 Security__
+
+Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
+
+Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation.
+
+__🗜️ Data Size__
+
+Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) and field tag "-" automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
+
+__:jigsaw: Usability__
+
+API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines.
+
+Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc.
+
+__📆 Extensibility__
+
+Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library.
+
+
+
+
+
+### Secure Decoding with Configurable Settings
+
+`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
+
+Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data.
+
+> [!NOTE]
+> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`:
+>
+> | Codec | Speed (ns/op) | Memory | Allocs |
+> | :---- | ------------: | -----: | -----: |
+> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op |
+> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op |
+>
+> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference.
+>
+> 🔎 Benchmark details
+>
+> Latest comparison for decoding CBOR data to Go `[]byte`:
+> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
+> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores)
+> - go test -bench=. -benchmem -count=20
+>
+> #### Prior comparisons
+>
+> | Codec | Speed (ns/op) | Memory | Allocs |
+> | :---- | ------------: | -----: | -----: |
+> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
+> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
+> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
+> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
+>
+> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
+> - go1.19.6, linux/amd64, i5-13600K (DDR4)
+> - go test -bench=. -benchmem -count=20
+>
+>
+
+In contrast, some codecs can crash or use excessive resources while decoding bad data.
+
+> [!WARNING]
+> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
+>
+> 🔎 gob fatal error (out of memory) 💥 decoding 181 bytes
+>
+> ```Go
+> // Example of encoding/gob having "fatal error: runtime: out of memory"
+> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024).
+> package main
+> import (
+> "bytes"
+> "encoding/gob"
+> "encoding/hex"
+> "fmt"
+> )
+>
+> // Example data is from https://github.com/golang/go/issues/24446
+> // (shortened to 181 bytes).
+> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
+> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
+> "860001013001ff860001013001ffb80000001eff850401010e3030303030" +
+> "30303030303030303001ff3000010c0104000016ffb70201010830303030" +
+> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
+> "303030303030303030303030303030303030303030303030303030303030" +
+> "30"
+>
+> type X struct {
+> J *X
+> K map[string]int
+> }
+>
+> func main() {
+> raw, _ := hex.DecodeString(data)
+> decoder := gob.NewDecoder(bytes.NewReader(raw))
+>
+> var x X
+> decoder.Decode(&x) // fatal error: runtime: out of memory
+> fmt.Println("Decoding finished.")
+> }
+> ```
+>
+>
+>
+
+### Smaller Encodings with Struct Tag Options
+
+Struct tags automatically reduce encoded size of structs and improve speed.
+
+We can write less code by using struct tag options:
+- `toarray`: encode without field names (decode back to original struct)
+- `keyasint`: encode field names as integers (decode back to original struct)
+- `omitempty`: omit empty field when encoding
+- `omitzero`: omit zero-value field when encoding
+
+As a special case, struct field tag "-" omits the field.
+
+NOTE: When a struct uses `toarray`, the encoder will ignore `omitempty` and `omitzero` to prevent position of encoded array elements from changing. This allows decoder to match encoded elements to their Go struct field.
+
+
+
+> [!NOTE]
+> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte!
+> - `encoding/json`: 18 bytes of JSON
+> - `fxamacker/cbor`: 1 byte of CBOR
+>
+> 🔎 Encoding 3-level nested Go struct with omitempty
+>
+> https://go.dev/play/p/YxwvfPdFQG2
+>
+> ```Go
+> // Example encoding nested struct (with omitempty tag)
+> // - encoding/json: 18 byte JSON
+> // - fxamacker/cbor: 1 byte CBOR
+>
+> package main
+>
+> import (
+> "encoding/hex"
+> "encoding/json"
+> "fmt"
+>
+> "github.com/fxamacker/cbor/v2"
+> )
+>
+> type GrandChild struct {
+> Quux int `json:",omitempty"`
+> }
+>
+> type Child struct {
+> Baz int `json:",omitempty"`
+> Qux GrandChild `json:",omitempty"`
+> }
+>
+> type Parent struct {
+> Foo Child `json:",omitempty"`
+> Bar int `json:",omitempty"`
+> }
+>
+> func cb() {
+> results, _ := cbor.Marshal(Parent{})
+> fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
+>
+> text, _ := cbor.Diagnose(results) // Diagnostic Notation
+> fmt.Println("DN: " + text)
+> }
+>
+> func js() {
+> results, _ := json.Marshal(Parent{})
+> fmt.Println("hex(JSON): " + hex.EncodeToString(results))
+>
+> text := string(results) // JSON
+> fmt.Println("JSON: " + text)
+> }
+>
+> func main() {
+> cb()
+> fmt.Println("-------------")
+> js()
+> }
+> ```
+>
+> Output (DN is Diagnostic Notation):
+> ```
+> hex(CBOR): a0
+> DN: {}
+> -------------
+> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
+> JSON: {"Foo":{"Qux":{}}}
+> ```
+>
+>
+
+
+## Quick Start
+
+__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
+
+> [!TIP]
+>
+> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta).
+>
+> 🔎 More about tinygo feature branch
+>
+> ### Tinygo
+>
+> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go).
+>
+> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo.
+>
+> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet.
+>
+> Changes in this feature branch only affect tinygo compiled software. Summary of changes:
+> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33.
+> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature.
+> - encoding error message can be different when encoding function type.
+>
+> Related tinygo issues:
+> - https://github.com/tinygo-org/tinygo/issues/4277
+> - https://github.com/tinygo-org/tinygo/issues/4458
+>
+>
+
+
+### Key Points
+
+This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
+
+- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items.
+- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items.
+
+Configurable limits and options can be used to balance trade-offs.
+
+- Encoding and decoding modes are created from options (settings).
+- Modes can be created at startup and reused.
+- Modes are safe for concurrent use.
+
+### Default Mode
+
+Package level functions only use this library's default settings.
+They provide the "default mode" of encoding and decoding.
+
+```go
+// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc.
+b, err = cbor.Marshal(v) // encode v to []byte b
+err = cbor.Unmarshal(b, &v) // decode []byte b to v
+decoder = cbor.NewDecoder(r) // create decoder with io.Reader r
+err = decoder.Decode(&v) // decode a CBOR data item to v
+
+// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface.
+err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool.
+
+// v2.5.0 added new functions that return remaining bytes.
+
+// UnmarshalFirst decodes first CBOR data item and returns remaining bytes.
+rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
+
+// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
+text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text
+
+// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but
+// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes.
+```
+
+> [!IMPORTANT]
+> CBOR settings allow trade-offs between speed, security, encoding size, etc.
+>
+> - Different CBOR libraries may use different default settings.
+> - CBOR-based formats or protocols usually require specific settings.
+>
+> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
+
+### Presets
+
+Presets can be used as-is or as a starting point for custom settings.
+
+```go
+// EncOptions is a struct of encoder settings.
+func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding
+func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization
+func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR
+func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR
+```
+
+Presets are used to create custom modes.
+
+### Custom Modes
+
+Modes are created from settings. Once created, modes have immutable settings.
+
+💡 Create the mode at startup and reuse it. It is safe for concurrent use.
+
+```Go
+// Create encoding mode.
+opts := cbor.CoreDetEncOptions() // use preset options as a starting point
+opts.Time = cbor.TimeUnix // change any settings if needed
+em, err := opts.EncMode() // create an immutable encoding mode
+
+// Reuse the encoding mode. It is safe for concurrent use.
+
+// API matches encoding/json.
+b, err := em.Marshal(v) // encode v to []byte b
+encoder := em.NewEncoder(w) // create encoder with io.Writer w
+err := encoder.Encode(v) // encode v to io.Writer w
+```
+
+Default mode and custom modes automatically apply struct tags.
+
+### User Specified Buffer for Encoding (v2.7.0)
+
+`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool.
+
+```Go
+em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode
+
+var buf bytes.Buffer
+err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
+```
+
+### Struct Tags
+
+Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs.
+
+As a special case, struct field tag "-" omits the field.
+
+ 🔎 Example encoding with struct field tag "-"
+
+https://go.dev/play/p/aWEIFxd7InX
+
+```Go
+// https://github.com/fxamacker/cbor/issues/652
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+// The `cbor:"-"` tag omits the Type field when encoding to CBOR.
+type Entity struct {
+ _ struct{} `cbor:",toarray"`
+ ID uint64 `json:"id"`
+ Type string `cbor:"-" json:"typeOf"`
+ Name string `json:"name"`
+}
+
+func main() {
+ entity := Entity{
+ ID: 1,
+ Type: "int64",
+ Name: "Identifier",
+ }
+
+ c, _ := cbor.Marshal(entity)
+ diag, _ := cbor.Diagnose(c)
+ fmt.Printf("CBOR in hex: %x\n", c)
+ fmt.Printf("CBOR in edn: %s\n", diag)
+
+ j, _ := json.Marshal(entity)
+ fmt.Printf("JSON: %s\n", string(j))
+
+ fmt.Printf("JSON encoding is %d bytes\n", len(j))
+ fmt.Printf("CBOR encoding is %d bytes\n", len(c))
+
+ // Output:
+ // CBOR in hex: 82016a4964656e746966696572
+ // CBOR in edn: [1, "Identifier"]
+ // JSON: {"id":1,"typeOf":"int64","name":"Identifier"}
+ // JSON encoding is 45 bytes
+ // CBOR encoding is 13 bytes
+}
+```
+
+
+
+ 🔎 Example encoding 3-level nested Go struct to 1 byte CBOR
+
+https://go.dev/play/p/YxwvfPdFQG2
+
+```Go
+// Example encoding nested struct (with omitempty tag)
+// - encoding/json: 18 byte JSON
+// - fxamacker/cbor: 1 byte CBOR
+package main
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+type GrandChild struct {
+ Quux int `json:",omitempty"`
+}
+
+type Child struct {
+ Baz int `json:",omitempty"`
+ Qux GrandChild `json:",omitempty"`
+}
+
+type Parent struct {
+ Foo Child `json:",omitempty"`
+ Bar int `json:",omitempty"`
+}
+
+func cb() {
+ results, _ := cbor.Marshal(Parent{})
+ fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
+
+ text, _ := cbor.Diagnose(results) // Diagnostic Notation
+ fmt.Println("DN: " + text)
+}
+
+func js() {
+ results, _ := json.Marshal(Parent{})
+ fmt.Println("hex(JSON): " + hex.EncodeToString(results))
+
+ text := string(results) // JSON
+ fmt.Println("JSON: " + text)
+}
+
+func main() {
+ cb()
+ fmt.Println("-------------")
+ js()
+}
+```
+
+Output (DN is Diagnostic Notation):
+```
+hex(CBOR): a0
+DN: {}
+-------------
+hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
+JSON: {"Foo":{"Qux":{}}}
+```
+
+
+
+
+
+ 🔎 Example using struct tag options
+
+
+
+
+
+Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
+
+### CBOR Tags
+
+CBOR tags are specified in a `TagSet`.
+
+Custom modes can be created with a `TagSet` to handle CBOR tags.
+
+```go
+em, err := opts.EncMode() // no CBOR tags
+em, err := opts.EncModeWithTags(ts) // immutable CBOR tags
+em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags
+```
+
+`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`.
+
+ 🔎 Example using TagSet and TagOptions
+
+```go
+// Use signedCWT struct defined in "Decoding CWT" example.
+
+// Create TagSet (safe for concurrency).
+tags := cbor.NewTagSet()
+// Register tag COSE_Sign1 18 with signedCWT type.
+tags.Add(
+ cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired},
+ reflect.TypeOf(signedCWT{}),
+ 18)
+
+// Create DecMode with immutable tags.
+dm, _ := cbor.DecOptions{}.DecModeWithTags(tags)
+
+// Unmarshal to signedCWT with tag support.
+var v signedCWT
+if err := dm.Unmarshal(data, &v); err != nil {
+ return err
+}
+
+// Create EncMode with immutable tags.
+em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
+
+// Marshal signedCWT with tag number.
+if data, err := em.Marshal(v); err != nil {
+ return err
+}
+```
+
+
+
+👉 `fxamacker/cbor` allows user apps to use almost any current or future CBOR tag number by implementing `cbor.Marshaler` and `cbor.Unmarshaler` interfaces.
+
+Basically, `MarshalCBOR` and `UnmarshalCBOR` functions can be implemented by user apps and those functions will automatically be called by this CBOR codec's `Marshal`, `Unmarshal`, etc.
+
+The following [example](https://github.com/fxamacker/cbor/blob/master/example_embedded_json_tag_for_cbor_test.go) shows how to encode and decode a tagged CBOR data item with tag number 262. The tag content is a JSON object "embedded" as a CBOR byte string (major type 2).
+
+ 🔎 Example using Embedded JSON Tag for CBOR (tag 262)
+
+```go
+// https://github.com/fxamacker/cbor/issues/657
+
+package cbor_test
+
+// NOTE: RFC 8949 does not mention tag number 262. IANA assigned
+// CBOR tag number 262 as "Embedded JSON Object" specified by the
+// document Embedded JSON Tag for CBOR:
+//
+// "Tag 262 can be applied to a byte string (major type 2) to indicate
+// that the byte string is a JSON Object. The length of the byte string
+// indicates the content."
+//
+// For more info, see Embedded JSON Tag for CBOR at:
+// https://github.com/toravir/CBOR-Tag-Specs/blob/master/embeddedJSON.md
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+// cborTagNumForEmbeddedJSON is the CBOR tag number 262.
+const cborTagNumForEmbeddedJSON = 262
+
+// EmbeddedJSON represents a Go value to be encoded as a tagged CBOR data item
+// with tag number 262 and the tag content is a JSON object "embedded" as a
+// CBOR byte string (major type 2).
+type EmbeddedJSON struct {
+ any
+}
+
+func NewEmbeddedJSON(val any) EmbeddedJSON {
+ return EmbeddedJSON{val}
+}
+
+// MarshalCBOR encodes EmbeddedJSON to a tagged CBOR data item with the
+// tag number 262 and the tag content is a JSON object that is
+// "embedded" as a CBOR byte string.
+func (v EmbeddedJSON) MarshalCBOR() ([]byte, error) {
+ // Encode v to JSON object.
+ data, err := json.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create cbor.Tag representing a tagged CBOR data item.
+ tag := cbor.Tag{
+ Number: cborTagNumForEmbeddedJSON,
+ Content: data,
+ }
+
+ // Marshal to a tagged CBOR data item.
+ return cbor.Marshal(tag)
+}
+
+// UnmarshalCBOR decodes a tagged CBOR data item to EmbeddedJSON.
+// The byte slice provided to this function must contain a single
+// tagged CBOR data item with the tag number 262 and tag content
+// must be a JSON object "embedded" as a CBOR byte string.
+func (v *EmbeddedJSON) UnmarshalCBOR(b []byte) error {
+ // Unmarshal tagged CBOR data item.
+ var tag cbor.Tag
+ if err := cbor.Unmarshal(b, &tag); err != nil {
+ return err
+ }
+
+ // Check tag number.
+ if tag.Number != cborTagNumForEmbeddedJSON {
+ return fmt.Errorf("got tag number %d, expect tag number %d", tag.Number, cborTagNumForEmbeddedJSON)
+ }
+
+ // Check tag content.
+ jsonData, isByteString := tag.Content.([]byte)
+ if !isByteString {
+ return fmt.Errorf("got tag content type %T, expect tag content []byte", tag.Content)
+ }
+
+ // Unmarshal JSON object.
+ return json.Unmarshal(jsonData, v)
+}
+
+// MarshalJSON encodes EmbeddedJSON to a JSON object.
+func (v EmbeddedJSON) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.any)
+}
+
+// UnmarshalJSON decodes a JSON object.
+func (v *EmbeddedJSON) UnmarshalJSON(b []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(b))
+ dec.UseNumber()
+ return dec.Decode(&v.any)
+}
+
+func Example_embeddedJSONTagForCBOR() {
+ value := NewEmbeddedJSON(map[string]any{
+ "name": "gopher",
+ "id": json.Number("42"),
+ })
+
+ data, err := cbor.Marshal(value)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("cbor: %x\n", data)
+
+ var v EmbeddedJSON
+ err = cbor.Unmarshal(data, &v)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("%+v\n", v.any)
+ for k, v := range v.any.(map[string]any) {
+ fmt.Printf(" %s: %v (%T)\n", k, v, v)
+ }
+}
+```
+
+
+
+
+### Functions and Interfaces
+
+ 🔎 Functions and interfaces at a glance
+
+Common functions with same API as `encoding/json`:
+- `Marshal`, `Unmarshal`
+- `NewEncoder`, `(*Encoder).Encode`
+- `NewDecoder`, `(*Decoder).Decode`
+
+NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes
+because RFC 8949 treats CBOR data item with remaining bytes as malformed.
+- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes.
+
+Other useful functions:
+- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data.
+- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes.
+- `Wellformed` returns true if the CBOR data item is well-formed.
+
+Interfaces identical or comparable to Go `encoding` packages include:
+`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`.
+
+The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding.
+
+
+
+### Security Tips
+
+🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data.
+
+Default limits may need to be increased for systems handling very large data (e.g. blockchains).
+
+`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`.
+
+## Status
+
+[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs.
+- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string.
+- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function.
+- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR.
+
+v2.9.0 passed fuzz tests and is production quality.
+
+The minimum version of Go required to build:
+- v2.8.0 and newer releases require go 1.20+.
+- v2.7.1 and older releases require go 1.17+.
+
+For more details, see [release notes](https://github.com/fxamacker/cbor/releases).
+
+### Prior Releases
+
+[v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality.
+
+[v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
+
+[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings.
+
+[v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
+
+__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading.
+
+See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes.
+
+See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
+
+
+
+## Who uses fxamacker/cbor
+
+`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Confidential Computing Consortium, ConsenSys, EdgeX Foundry, F5, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes, Let's Encrypt (ISRG), Linaro, Linux Foundation, Matrix.org, Microsoft, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Red Hat OpenShift, Smallstep, Tailscale, Taurus SA, TIBCO, Veraison, and others.
+
+`fxamacker/cbor` passed multiple confidential security assessments in 2022. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) assessed a subset of fxamacker/cbor v2.4.
+
+## Standards
+
+`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
+
+Notable CBOR features include:
+
+| CBOR Feature | Description |
+| :--- | :--- |
+| CBOR tags | API supports built-in and user-defined tags. |
+| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. |
+| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). |
+| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. |
+| Indefinite length data | Option to allow/forbid for encoding and decoding. |
+| Well-formedness | Always checked and enforced. |
+| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. |
+| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). |
+
+Known limitations are noted in the [Limitations section](#limitations).
+
+Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps.
+
+Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data.
+
+After well-formedness is verified, basic validity errors are handled as follows:
+
+* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default.
+* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys.
+
+When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future.
+
+By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined.
+
+__Click to expand topic:__
+
+
+ 🔎 Duplicate Map Keys
+
+This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct.
+
+`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type.
+
+`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number.
+
+APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol.
+
+
+
+
+ 🔎 Tag Validity
+
+This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799):
+
+* Inadmissible type for tag content
+* Inadmissible value for tag content
+
+Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways:
+
+* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type.
+* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified.
+
+Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR.
+
+For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options).
+
+
+
+## Limitations
+
+If any of these limitations prevent you from using this library, please open an issue along with a link to your project.
+
+* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`.
+* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items.
+* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation.
+
+## Fuzzing and Code Coverage
+
+__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release.
+
+__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project.
+
+
+
+## Versions and API Changes
+This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes.
+
+These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases:
+`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`.
+
+Exclusions from SemVer:
+- Newly added API documented as "subject to change".
+- Newly added API in the master branch that has never been tagged in non-beta release.
+- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
+
+This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions.
+
+## Code of Conduct
+
+This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments.
+
+## Contributing
+
+Please open an issue before beginning work on a PR. The improvement may have already been considered, etc.
+
+For more info, see [How to Contribute](CONTRIBUTING.md).
+
+## Security Policy
+
+Security fixes are provided for the latest released version of fxamacker/cbor.
+
+For the full text of the Security Policy, see [SECURITY.md](SECURITY.md).
+
+## Acknowledgements
+
+Many thanks to all the contributors on this project!
+
+I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more.
+
+I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days.
+
+Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0.
+
+This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs.
+
+Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis).
+
+Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included!
+
+This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well.
+
+## License
+
+Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker).
+
+fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text.
+
+
diff --git a/vendor/github.com/fxamacker/cbor/v2/SECURITY.md b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md
new file mode 100644
index 000000000..9c05146d1
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md
@@ -0,0 +1,7 @@
+# Security Policy
+
+Security fixes are provided for the latest released version of fxamacker/cbor.
+
+If the security vulnerability is already known to the public, then you can open an issue as a bug report.
+
+To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public.
diff --git a/vendor/github.com/fxamacker/cbor/v2/bytestring.go b/vendor/github.com/fxamacker/cbor/v2/bytestring.go
new file mode 100644
index 000000000..23c5724d2
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/bytestring.go
@@ -0,0 +1,90 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "errors"
+)
+
+// ByteString represents CBOR byte string (major type 2). ByteString can be used
+// when using a Go []byte is not possible or convenient. For example, Go doesn't
+// allow []byte as map key, so ByteString can be used to support data formats
+// having CBOR map with byte string keys. ByteString can also be used to
+// encode invalid UTF-8 string as CBOR byte string.
+// See DecOption.MapKeyByteStringMode for more details.
+type ByteString string
+
+// Bytes returns bytes representing ByteString.
+func (bs ByteString) Bytes() []byte {
+ return []byte(bs)
+}
+
+// MarshalCBOR encodes ByteString as CBOR byte string (major type 2).
+func (bs ByteString) MarshalCBOR() ([]byte, error) {
+ e := getEncodeBuffer()
+ defer putEncodeBuffer(e)
+
+ // Encode length
+ encodeHead(e, byte(cborTypeByteString), uint64(len(bs)))
+
+ // Encode data
+ buf := make([]byte, e.Len()+len(bs))
+ n := copy(buf, e.Bytes())
+ copy(buf[n:], bs)
+
+ return buf, nil
+}
+
+// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
+// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
+//
+// Deprecated: No longer used by this codec; kept for compatibility
+// with user apps that directly call this function.
+func (bs *ByteString) UnmarshalCBOR(data []byte) error {
+ if bs == nil {
+ return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ // Check well-formedness of CBOR data item.
+ // ByteString.UnmarshalCBOR() is exported, so
+ // the codec needs to support same behavior for:
+ // - Unmarshal(data, *ByteString)
+ // - ByteString.UnmarshalCBOR(data)
+ err := d.wellformed(false, false)
+ if err != nil {
+ return err
+ }
+
+ return bs.unmarshalCBOR(data)
+}
+
+// unmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
+// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
+// This function assumes data is well-formed, and does not perform bounds checking.
+// This function is called by Unmarshal().
+func (bs *ByteString) unmarshalCBOR(data []byte) error {
+ if bs == nil {
+ return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
+ }
+
+ // Decoding CBOR null and CBOR undefined to ByteString resets data.
+ // This behavior is similar to decoding CBOR null and CBOR undefined to []byte.
+ if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
+ *bs = ""
+ return nil
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ // Check if CBOR data type is byte string
+ if typ := d.nextCBORType(); typ != cborTypeByteString {
+ return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()}
+ }
+
+ b, _ := d.parseByteString()
+ *bs = ByteString(b)
+ return nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/cache.go b/vendor/github.com/fxamacker/cbor/v2/cache.go
new file mode 100644
index 000000000..5051f110f
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/cache.go
@@ -0,0 +1,370 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+type encodeFuncs struct {
+ ef encodeFunc
+ ief isEmptyFunc
+ izf isZeroFunc
+}
+
+var (
+ decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType
+ encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType
+ encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs
+ typeInfoCache sync.Map // map[reflect.Type]*typeInfo
+)
+
+type specialType int
+
+const (
+ specialTypeNone specialType = iota
+ specialTypeUnmarshalerIface
+ specialTypeUnexportedUnmarshalerIface
+ specialTypeEmptyIface
+ specialTypeIface
+ specialTypeTag
+ specialTypeTime
+ specialTypeJSONUnmarshalerIface
+)
+
+type typeInfo struct {
+ elemTypeInfo *typeInfo
+ keyTypeInfo *typeInfo
+ typ reflect.Type
+ kind reflect.Kind
+ nonPtrType reflect.Type
+ nonPtrKind reflect.Kind
+ spclType specialType
+}
+
+func newTypeInfo(t reflect.Type) *typeInfo {
+ tInfo := typeInfo{typ: t, kind: t.Kind()}
+
+ for t.Kind() == reflect.Pointer {
+ t = t.Elem()
+ }
+
+ k := t.Kind()
+
+ tInfo.nonPtrType = t
+ tInfo.nonPtrKind = k
+
+ if k == reflect.Interface {
+ if t.NumMethod() == 0 {
+ tInfo.spclType = specialTypeEmptyIface
+ } else {
+ tInfo.spclType = specialTypeIface
+ }
+ } else if t == typeTag {
+ tInfo.spclType = specialTypeTag
+ } else if t == typeTime {
+ tInfo.spclType = specialTypeTime
+ } else if reflect.PointerTo(t).Implements(typeUnexportedUnmarshaler) {
+ tInfo.spclType = specialTypeUnexportedUnmarshalerIface
+ } else if reflect.PointerTo(t).Implements(typeUnmarshaler) {
+ tInfo.spclType = specialTypeUnmarshalerIface
+ } else if reflect.PointerTo(t).Implements(typeJSONUnmarshaler) {
+ tInfo.spclType = specialTypeJSONUnmarshalerIface
+ }
+
+ switch k {
+ case reflect.Array, reflect.Slice:
+ tInfo.elemTypeInfo = getTypeInfo(t.Elem())
+ case reflect.Map:
+ tInfo.keyTypeInfo = getTypeInfo(t.Key())
+ tInfo.elemTypeInfo = getTypeInfo(t.Elem())
+ }
+
+ return &tInfo
+}
+
+type decodingStructType struct {
+ fields fields
+ fieldIndicesByName map[string]int
+ err error
+ toArray bool
+}
+
+// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead,
+// here's a very basic implementation of an aggregated error.
+type multierror []error
+
+func (m multierror) Error() string {
+ var sb strings.Builder
+ for i, err := range m {
+ sb.WriteString(err.Error())
+ if i < len(m)-1 {
+ sb.WriteString(", ")
+ }
+ }
+ return sb.String()
+}
+
+func getDecodingStructType(t reflect.Type) *decodingStructType {
+ if v, _ := decodingStructTypeCache.Load(t); v != nil {
+ return v.(*decodingStructType)
+ }
+
+ flds, structOptions := getFields(t)
+
+ toArray := hasToArrayOption(structOptions)
+
+ var errs []error
+ for i := 0; i < len(flds); i++ {
+ if flds[i].keyAsInt {
+ nameAsInt, numErr := strconv.Atoi(flds[i].name)
+ if numErr != nil {
+ errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")"))
+ break
+ }
+ flds[i].nameAsInt = int64(nameAsInt)
+ }
+
+ flds[i].typInfo = getTypeInfo(flds[i].typ)
+ }
+
+ fieldIndicesByName := make(map[string]int, len(flds))
+ for i, fld := range flds {
+ if _, ok := fieldIndicesByName[fld.name]; ok {
+ errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name))
+ continue
+ }
+ fieldIndicesByName[fld.name] = i
+ }
+
+ var err error
+ {
+ var multi multierror
+ for _, each := range errs {
+ if each != nil {
+ multi = append(multi, each)
+ }
+ }
+ if len(multi) == 1 {
+ err = multi[0]
+ } else if len(multi) > 1 {
+ err = multi
+ }
+ }
+
+ structType := &decodingStructType{
+ fields: flds,
+ fieldIndicesByName: fieldIndicesByName,
+ err: err,
+ toArray: toArray,
+ }
+ decodingStructTypeCache.Store(t, structType)
+ return structType
+}
+
+type encodingStructType struct {
+ fields fields
+ bytewiseFields fields
+ lengthFirstFields fields
+ omitEmptyFieldsIdx []int
+ err error
+ toArray bool
+}
+
+func (st *encodingStructType) getFields(em *encMode) fields {
+ switch em.sort {
+ case SortNone, SortFastShuffle:
+ return st.fields
+ case SortLengthFirst:
+ return st.lengthFirstFields
+ default:
+ return st.bytewiseFields
+ }
+}
+
+type bytewiseFieldSorter struct {
+ fields fields
+}
+
+func (x *bytewiseFieldSorter) Len() int {
+ return len(x.fields)
+}
+
+func (x *bytewiseFieldSorter) Swap(i, j int) {
+ x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
+}
+
+func (x *bytewiseFieldSorter) Less(i, j int) bool {
+ return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
+}
+
+type lengthFirstFieldSorter struct {
+ fields fields
+}
+
+func (x *lengthFirstFieldSorter) Len() int {
+ return len(x.fields)
+}
+
+func (x *lengthFirstFieldSorter) Swap(i, j int) {
+ x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
+}
+
+func (x *lengthFirstFieldSorter) Less(i, j int) bool {
+ if len(x.fields[i].cborName) != len(x.fields[j].cborName) {
+ return len(x.fields[i].cborName) < len(x.fields[j].cborName)
+ }
+ return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
+}
+
+func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
+ if v, _ := encodingStructTypeCache.Load(t); v != nil {
+ structType := v.(*encodingStructType)
+ return structType, structType.err
+ }
+
+ flds, structOptions := getFields(t)
+
+ if hasToArrayOption(structOptions) {
+ return getEncodingStructToArrayType(t, flds)
+ }
+
+ var err error
+ var hasKeyAsInt bool
+ var hasKeyAsStr bool
+ var omitEmptyIdx []int
+ e := getEncodeBuffer()
+ for i := 0; i < len(flds); i++ {
+ // Get field's encodeFunc
+ flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ)
+ if flds[i].ef == nil {
+ err = &UnsupportedTypeError{t}
+ break
+ }
+
+ // Encode field name
+ if flds[i].keyAsInt {
+ nameAsInt, numErr := strconv.Atoi(flds[i].name)
+ if numErr != nil {
+ err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")")
+ break
+ }
+ flds[i].nameAsInt = int64(nameAsInt)
+ if nameAsInt >= 0 {
+ encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt))
+ } else {
+ n := nameAsInt*(-1) - 1
+ encodeHead(e, byte(cborTypeNegativeInt), uint64(n))
+ }
+ flds[i].cborName = make([]byte, e.Len())
+ copy(flds[i].cborName, e.Bytes())
+ e.Reset()
+
+ hasKeyAsInt = true
+ } else {
+ encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name)))
+ flds[i].cborName = make([]byte, e.Len()+len(flds[i].name))
+ n := copy(flds[i].cborName, e.Bytes())
+ copy(flds[i].cborName[n:], flds[i].name)
+ e.Reset()
+
+ // If cborName contains a text string, then cborNameByteString contains a
+ // string that has the byte string major type but is otherwise identical to
+ // cborName.
+ flds[i].cborNameByteString = make([]byte, len(flds[i].cborName))
+ copy(flds[i].cborNameByteString, flds[i].cborName)
+ // Reset encoded CBOR type to byte string, preserving the "additional
+ // information" bits:
+ flds[i].cborNameByteString[0] = byte(cborTypeByteString) |
+ getAdditionalInformation(flds[i].cborNameByteString[0])
+
+ hasKeyAsStr = true
+ }
+
+ // Check if field can be omitted when empty
+ if flds[i].omitEmpty {
+ omitEmptyIdx = append(omitEmptyIdx, i)
+ }
+ }
+ putEncodeBuffer(e)
+
+ if err != nil {
+ structType := &encodingStructType{err: err}
+ encodingStructTypeCache.Store(t, structType)
+ return structType, structType.err
+ }
+
+ // Sort fields by canonical order
+ bytewiseFields := make(fields, len(flds))
+ copy(bytewiseFields, flds)
+ sort.Sort(&bytewiseFieldSorter{bytewiseFields})
+
+ lengthFirstFields := bytewiseFields
+ if hasKeyAsInt && hasKeyAsStr {
+ lengthFirstFields = make(fields, len(flds))
+ copy(lengthFirstFields, flds)
+ sort.Sort(&lengthFirstFieldSorter{lengthFirstFields})
+ }
+
+ structType := &encodingStructType{
+ fields: flds,
+ bytewiseFields: bytewiseFields,
+ lengthFirstFields: lengthFirstFields,
+ omitEmptyFieldsIdx: omitEmptyIdx,
+ }
+
+ encodingStructTypeCache.Store(t, structType)
+ return structType, structType.err
+}
+
+func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) {
+ for i := 0; i < len(flds); i++ {
+ // Get field's encodeFunc
+ flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ)
+ if flds[i].ef == nil {
+ structType := &encodingStructType{err: &UnsupportedTypeError{t}}
+ encodingStructTypeCache.Store(t, structType)
+ return structType, structType.err
+ }
+ }
+
+ structType := &encodingStructType{
+ fields: flds,
+ toArray: true,
+ }
+ encodingStructTypeCache.Store(t, structType)
+ return structType, structType.err
+}
+
+func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc, isZeroFunc) {
+ if v, _ := encodeFuncCache.Load(t); v != nil {
+ fs := v.(encodeFuncs)
+ return fs.ef, fs.ief, fs.izf
+ }
+ ef, ief, izf := getEncodeFuncInternal(t)
+ encodeFuncCache.Store(t, encodeFuncs{ef, ief, izf})
+ return ef, ief, izf
+}
+
+func getTypeInfo(t reflect.Type) *typeInfo {
+ if v, _ := typeInfoCache.Load(t); v != nil {
+ return v.(*typeInfo)
+ }
+ tInfo := newTypeInfo(t)
+ typeInfoCache.Store(t, tInfo)
+ return tInfo
+}
+
+func hasToArrayOption(tag string) bool {
+ s := ",toarray"
+ idx := strings.Index(tag, s)
+ return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',')
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/common.go b/vendor/github.com/fxamacker/cbor/v2/common.go
new file mode 100644
index 000000000..9cf33cd20
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/common.go
@@ -0,0 +1,191 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+)
+
+type cborType uint8
+
+const (
+ cborTypePositiveInt cborType = 0x00
+ cborTypeNegativeInt cborType = 0x20
+ cborTypeByteString cborType = 0x40
+ cborTypeTextString cborType = 0x60
+ cborTypeArray cborType = 0x80
+ cborTypeMap cborType = 0xa0
+ cborTypeTag cborType = 0xc0
+ cborTypePrimitives cborType = 0xe0
+)
+
+func (t cborType) String() string {
+ switch t {
+ case cborTypePositiveInt:
+ return "positive integer"
+ case cborTypeNegativeInt:
+ return "negative integer"
+ case cborTypeByteString:
+ return "byte string"
+ case cborTypeTextString:
+ return "UTF-8 text string"
+ case cborTypeArray:
+ return "array"
+ case cborTypeMap:
+ return "map"
+ case cborTypeTag:
+ return "tag"
+ case cborTypePrimitives:
+ return "primitives"
+ default:
+ return "Invalid type " + strconv.Itoa(int(t))
+ }
+}
+
+type additionalInformation uint8
+
+const (
+ maxAdditionalInformationWithoutArgument = 23
+ additionalInformationWith1ByteArgument = 24
+ additionalInformationWith2ByteArgument = 25
+ additionalInformationWith4ByteArgument = 26
+ additionalInformationWith8ByteArgument = 27
+
+ // For major type 7.
+ additionalInformationAsFalse = 20
+ additionalInformationAsTrue = 21
+ additionalInformationAsNull = 22
+ additionalInformationAsUndefined = 23
+ additionalInformationAsFloat16 = 25
+ additionalInformationAsFloat32 = 26
+ additionalInformationAsFloat64 = 27
+
+ // For major type 2, 3, 4, 5.
+ additionalInformationAsIndefiniteLengthFlag = 31
+)
+
+const (
+ maxSimpleValueInAdditionalInformation = 23
+ minSimpleValueIn1ByteArgument = 32
+)
+
+func (ai additionalInformation) isIndefiniteLength() bool {
+ return ai == additionalInformationAsIndefiniteLengthFlag
+}
+
+const (
+ // From RFC 8949 Section 3:
+ // "The initial byte of each encoded data item contains both information about the major type
+ // (the high-order 3 bits, described in Section 3.1) and additional information
+ // (the low-order 5 bits)."
+
+ // typeMask is used to extract major type in initial byte of encoded data item.
+ typeMask = 0xe0
+
+ // additionalInformationMask is used to extract additional information in initial byte of encoded data item.
+ additionalInformationMask = 0x1f
+)
+
+func getType(raw byte) cborType {
+ return cborType(raw & typeMask)
+}
+
+func getAdditionalInformation(raw byte) byte {
+ return raw & additionalInformationMask
+}
+
+func isBreakFlag(raw byte) bool {
+ return raw == cborBreakFlag
+}
+
+func parseInitialByte(b byte) (t cborType, ai byte) {
+ return getType(b), getAdditionalInformation(b)
+}
+
+const (
+ tagNumRFC3339Time = 0
+ tagNumEpochTime = 1
+ tagNumUnsignedBignum = 2
+ tagNumNegativeBignum = 3
+ tagNumExpectedLaterEncodingBase64URL = 21
+ tagNumExpectedLaterEncodingBase64 = 22
+ tagNumExpectedLaterEncodingBase16 = 23
+ tagNumSelfDescribedCBOR = 55799
+)
+
+const (
+ cborBreakFlag = byte(0xff)
+ cborByteStringWithIndefiniteLengthHead = byte(0x5f)
+ cborTextStringWithIndefiniteLengthHead = byte(0x7f)
+ cborArrayWithIndefiniteLengthHead = byte(0x9f)
+ cborMapWithIndefiniteLengthHead = byte(0xbf)
+)
+
+var (
+ cborFalse = []byte{0xf4}
+ cborTrue = []byte{0xf5}
+ cborNil = []byte{0xf6}
+ cborNaN = []byte{0xf9, 0x7e, 0x00}
+ cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00}
+ cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00}
+)
+
+// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types.
+func validBuiltinTag(tagNum uint64, contentHead byte) error {
+ t := getType(contentHead)
+ switch tagNum {
+ case tagNumRFC3339Time:
+ // Tag content (date/time text string in RFC 3339 format) must be string type.
+ if t != cborTypeTextString {
+ return newInadmissibleTagContentTypeError(
+ tagNumRFC3339Time,
+ "text string",
+ t.String())
+ }
+ return nil
+
+ case tagNumEpochTime:
+ // Tag content (epoch date/time) must be uint, int, or float type.
+ if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) {
+ return newInadmissibleTagContentTypeError(
+ tagNumEpochTime,
+ "integer or floating-point number",
+ t.String())
+ }
+ return nil
+
+ case tagNumUnsignedBignum, tagNumNegativeBignum:
+ // Tag content (bignum) must be byte type.
+ if t != cborTypeByteString {
+ return newInadmissibleTagContentTypeErrorf(
+ fmt.Sprintf(
+ "tag number %d or %d must be followed by byte string, got %s",
+ tagNumUnsignedBignum,
+ tagNumNegativeBignum,
+ t.String(),
+ ))
+ }
+ return nil
+
+ case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16:
+ // From RFC 8949 3.4.5.2:
+ // The data item tagged can be a byte string or any other data item. In the latter
+ // case, the tag applies to all of the byte string data items contained in the data
+ // item, except for those contained in a nested data item tagged with an expected
+ // conversion.
+ return nil
+ }
+
+ return nil
+}
+
+// Transcoder is a scheme for transcoding a single CBOR encoded data item to or from a different
+// data format.
+type Transcoder interface {
+ // Transcode reads the data item in its source format from a Reader and writes a
+ // corresponding representation in its destination format to a Writer.
+ Transcode(dst io.Writer, src io.Reader) error
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/decode.go b/vendor/github.com/fxamacker/cbor/v2/decode.go
new file mode 100644
index 000000000..f0bdc3b38
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/decode.go
@@ -0,0 +1,3318 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/x448/float16"
+)
+
+// Unmarshal parses the CBOR-encoded data into the value pointed to by v
+// using default decoding options. If v is nil, not a pointer, or
+// a nil pointer, Unmarshal returns an error.
+//
+// To unmarshal CBOR into a value implementing the Unmarshaler interface,
+// Unmarshal calls that value's UnmarshalCBOR method with a valid
+// CBOR value.
+//
+// To unmarshal CBOR byte string into a value implementing the
+// encoding.BinaryUnmarshaler interface, Unmarshal calls that value's
+// UnmarshalBinary method with decoded CBOR byte string.
+//
+// To unmarshal CBOR into a pointer, Unmarshal sets the pointer to nil
+// if CBOR data is null (0xf6) or undefined (0xf7). Otherwise, Unmarshal
+// unmarshals CBOR into the value pointed to by the pointer. If the
+// pointer is nil, Unmarshal creates a new value for it to point to.
+//
+// To unmarshal CBOR into an empty interface value, Unmarshal uses the
+// following rules:
+//
+// CBOR booleans decode to bool.
+// CBOR positive integers decode to uint64.
+// CBOR negative integers decode to int64 (big.Int if value overflows).
+// CBOR floating points decode to float64.
+// CBOR byte strings decode to []byte.
+// CBOR text strings decode to string.
+// CBOR arrays decode to []interface{}.
+// CBOR maps decode to map[interface{}]interface{}.
+// CBOR null and undefined values decode to nil.
+// CBOR times (tag 0 and 1) decode to time.Time.
+// CBOR bignums (tag 2 and 3) decode to big.Int.
+// CBOR tags with an unrecognized number decode to cbor.Tag
+//
+// To unmarshal a CBOR array into a slice, Unmarshal allocates a new slice
+// if the CBOR array is empty or slice capacity is less than CBOR array length.
+// Otherwise Unmarshal overwrites existing elements, and sets slice length
+// to CBOR array length.
+//
+// To unmarshal a CBOR array into a Go array, Unmarshal decodes CBOR array
+// elements into Go array elements. If the Go array is smaller than the
+// CBOR array, the extra CBOR array elements are discarded. If the CBOR
+// array is smaller than the Go array, the extra Go array elements are
+// set to zero values.
+//
+// To unmarshal a CBOR array into a struct, struct must have a special field "_"
+// with struct tag `cbor:",toarray"`. Go array elements are decoded into struct
+// fields. Any "omitempty" struct field tag option is ignored in this case.
+//
+// To unmarshal a CBOR map into a map, Unmarshal allocates a new map only if the
+// map is nil. Otherwise Unmarshal reuses the existing map and keeps existing
+// entries. Unmarshal stores key-value pairs from the CBOR map into Go map.
+// See DecOptions.DupMapKey to enable duplicate map key detection.
+//
+// To unmarshal a CBOR map into a struct, Unmarshal matches CBOR map keys to the
+// keys in the following priority:
+//
+// 1. "cbor" key in struct field tag,
+// 2. "json" key in struct field tag,
+// 3. struct field name.
+//
+// Unmarshal tries an exact match for field name, then a case-insensitive match.
+// Map key-value pairs without corresponding struct fields are ignored. See
+// DecOptions.ExtraReturnErrors to return error at unknown field.
+//
+// To unmarshal a CBOR text string into a time.Time value, Unmarshal parses text
+// string formatted in RFC3339. To unmarshal a CBOR integer/float into a
+// time.Time value, Unmarshal creates an unix time with integer/float as seconds
+// and fractional seconds since January 1, 1970 UTC. As a special case, Infinite
+// and NaN float values decode to time.Time's zero value.
+//
+// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a
+// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often
+// used to mean "not present", unmarshaling CBOR null and undefined value
+// into any other Go type has no effect and returns no error.
+//
+// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time),
+// and tag 2 and 3 (bignum).
+//
+// Unmarshal returns ExtraneousDataError error (without decoding into v)
+// if there are any remaining bytes following the first valid CBOR data item.
+// See UnmarshalFirst, if you want to unmarshal only the first
+// CBOR data item without ExtraneousDataError caused by remaining bytes.
+func Unmarshal(data []byte, v any) error {
+ return defaultDecMode.Unmarshal(data, v)
+}
+
+// UnmarshalFirst parses the first CBOR data item into the value pointed to by v
+// using default decoding options. Any remaining bytes are returned in rest.
+//
+// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
+//
+// See the documentation for Unmarshal for details.
+func UnmarshalFirst(data []byte, v any) (rest []byte, err error) {
+ return defaultDecMode.UnmarshalFirst(data, v)
+}
+
+// Valid checks whether data is a well-formed encoded CBOR data item and
+// that it complies with default restrictions such as MaxNestedLevels,
+// MaxArrayElements, MaxMapPairs, etc.
+//
+// If there are any remaining bytes after the CBOR data item,
+// an ExtraneousDataError is returned.
+//
+// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity)
+// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed".
+//
+// Deprecated: Valid is kept for compatibility and should not be used.
+// Use Wellformed instead because it has a more appropriate name.
+func Valid(data []byte) error {
+ return defaultDecMode.Valid(data)
+}
+
+// Wellformed checks whether data is a well-formed encoded CBOR data item and
+// that it complies with default restrictions such as MaxNestedLevels,
+// MaxArrayElements, MaxMapPairs, etc.
+//
+// If there are any remaining bytes after the CBOR data item,
+// an ExtraneousDataError is returned.
+func Wellformed(data []byte) error {
+ return defaultDecMode.Wellformed(data)
+}
+
+// Unmarshaler is the interface implemented by types that wish to unmarshal
+// CBOR data themselves. The input is a valid CBOR value. UnmarshalCBOR
+// must copy the CBOR data if it needs to use it after returning.
+type Unmarshaler interface {
+ UnmarshalCBOR([]byte) error
+}
+
+type unmarshaler interface {
+ unmarshalCBOR([]byte) error
+}
+
+// InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+type InvalidUnmarshalError struct {
+ s string
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ return e.s
+}
+
+// UnmarshalTypeError describes a CBOR value that can't be decoded to a Go type.
+type UnmarshalTypeError struct {
+ CBORType string // type of CBOR value
+ GoType string // type of Go value it could not be decoded into
+ StructFieldName string // name of the struct field holding the Go value (optional)
+ errorMsg string // additional error message (optional)
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ var s string
+ if e.StructFieldName != "" {
+ s = "cbor: cannot unmarshal " + e.CBORType + " into Go struct field " + e.StructFieldName + " of type " + e.GoType
+ } else {
+ s = "cbor: cannot unmarshal " + e.CBORType + " into Go value of type " + e.GoType
+ }
+ if e.errorMsg != "" {
+ s += " (" + e.errorMsg + ")"
+ }
+ return s
+}
+
+// InvalidMapKeyTypeError describes invalid Go map key type when decoding CBOR map.
+// For example, Go doesn't allow slice as map key.
+type InvalidMapKeyTypeError struct {
+ GoType string
+}
+
+func (e *InvalidMapKeyTypeError) Error() string {
+ return "cbor: invalid map key type: " + e.GoType
+}
+
+// DupMapKeyError describes detected duplicate map key in CBOR map.
+type DupMapKeyError struct {
+ Key any
+ Index int
+}
+
+func (e *DupMapKeyError) Error() string {
+ return fmt.Sprintf("cbor: found duplicate map key %#v at map element index %d", e.Key, e.Index)
+}
+
+// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct.
+type UnknownFieldError struct {
+ Index int
+}
+
+func (e *UnknownFieldError) Error() string {
+ return fmt.Sprintf("cbor: found unknown field at map element index %d", e.Index)
+}
+
+// UnacceptableDataItemError is returned when unmarshaling a CBOR input that contains a data item
+// that is not acceptable to a specific CBOR-based application protocol ("invalid or unexpected" as
+// described in RFC 8949 Section 5 Paragraph 3).
+type UnacceptableDataItemError struct {
+ CBORType string
+ Message string
+}
+
+func (e UnacceptableDataItemError) Error() string {
+ return fmt.Sprintf("cbor: data item of cbor type %s is not accepted by protocol: %s", e.CBORType, e.Message)
+}
+
+// ByteStringExpectedFormatError is returned when unmarshaling CBOR byte string fails when
+// using non-default ByteStringExpectedFormat decoding option that makes decoder expect
+// a specified format such as base64, hex, etc.
+type ByteStringExpectedFormatError struct {
+ expectedFormatOption ByteStringExpectedFormatMode
+ err error
+}
+
+func newByteStringExpectedFormatError(expectedFormatOption ByteStringExpectedFormatMode, err error) *ByteStringExpectedFormatError {
+ return &ByteStringExpectedFormatError{expectedFormatOption, err}
+}
+
+func (e *ByteStringExpectedFormatError) Error() string {
+ switch e.expectedFormatOption {
+ case ByteStringExpectedBase64URL:
+ return fmt.Sprintf("cbor: failed to decode base64url from byte string: %s", e.err)
+
+ case ByteStringExpectedBase64:
+ return fmt.Sprintf("cbor: failed to decode base64 from byte string: %s", e.err)
+
+ case ByteStringExpectedBase16:
+ return fmt.Sprintf("cbor: failed to decode hex from byte string: %s", e.err)
+
+ default:
+ return fmt.Sprintf("cbor: failed to decode byte string in expected format %d: %s", e.expectedFormatOption, e.err)
+ }
+}
+
+func (e *ByteStringExpectedFormatError) Unwrap() error {
+ return e.err
+}
+
+// InadmissibleTagContentTypeError is returned when unmarshaling built-in CBOR tags
+// fails because of inadmissible type for tag content. Currently, the built-in
+// CBOR tags in this codec are tags 0-3 and 21-23.
+// See "Tag validity" in RFC 8949 Section 5.3.2.
+type InadmissibleTagContentTypeError struct {
+ s string
+ tagNum int
+ expectedTagContentType string
+ gotTagContentType string
+}
+
+func newInadmissibleTagContentTypeError(
+ tagNum int,
+ expectedTagContentType string,
+ gotTagContentType string,
+) *InadmissibleTagContentTypeError {
+ return &InadmissibleTagContentTypeError{
+ tagNum: tagNum,
+ expectedTagContentType: expectedTagContentType,
+ gotTagContentType: gotTagContentType,
+ }
+}
+
+func newInadmissibleTagContentTypeErrorf(s string) *InadmissibleTagContentTypeError {
+ return &InadmissibleTagContentTypeError{s: "cbor: " + s} //nolint:goconst // ignore "cbor"
+}
+
+func (e *InadmissibleTagContentTypeError) Error() string {
+ if e.s == "" {
+ return fmt.Sprintf(
+ "cbor: tag number %d must be followed by %s, got %s",
+ e.tagNum,
+ e.expectedTagContentType,
+ e.gotTagContentType,
+ )
+ }
+ return e.s
+}
+
+// DupMapKeyMode specifies how to enforce duplicate map key. Two map keys are considered duplicates if:
+// 1. When decoding into a struct, both keys match the same struct field. The keys are also
+// considered duplicates if neither matches any field and decoding to interface{} would produce
+// equal (==) values for both keys.
+// 2. When decoding into a map, both keys are equal (==) when decoded into values of the
+// destination map's key type.
+type DupMapKeyMode int
+
+const (
+ // DupMapKeyQuiet doesn't enforce duplicate map key. Decoder quietly (no error)
+ // uses faster of "keep first" or "keep last" depending on Go data type and other factors.
+ DupMapKeyQuiet DupMapKeyMode = iota
+
+ // DupMapKeyEnforcedAPF enforces detection and rejection of duplicate map keys.
+ // APF means "Allow Partial Fill" and the destination map or struct can be partially filled.
+ // If a duplicate map key is detected, DupMapKeyError is returned without further decoding
+ // of the map. It's the caller's responsibility to respond to DupMapKeyError by
+ // discarding the partially filled result if their protocol requires it.
+ // WARNING: using DupMapKeyEnforcedAPF will decrease performance and increase memory use.
+ DupMapKeyEnforcedAPF
+
+ maxDupMapKeyMode
+)
+
+func (dmkm DupMapKeyMode) valid() bool {
+ return dmkm >= 0 && dmkm < maxDupMapKeyMode
+}
+
+// IndefLengthMode specifies whether to allow indefinite length items.
+type IndefLengthMode int
+
+const (
+ // IndefLengthAllowed allows indefinite length items.
+ IndefLengthAllowed IndefLengthMode = iota
+
+ // IndefLengthForbidden disallows indefinite length items.
+ IndefLengthForbidden
+
+ maxIndefLengthMode
+)
+
+func (m IndefLengthMode) valid() bool {
+ return m >= 0 && m < maxIndefLengthMode
+}
+
+// TagsMode specifies whether to allow CBOR tags.
+type TagsMode int
+
+const (
+ // TagsAllowed allows CBOR tags.
+ TagsAllowed TagsMode = iota
+
+ // TagsForbidden disallows CBOR tags.
+ TagsForbidden
+
+ maxTagsMode
+)
+
+func (tm TagsMode) valid() bool {
+ return tm >= 0 && tm < maxTagsMode
+}
+
+// IntDecMode specifies which Go type (int64, uint64, or big.Int) should
+// be used when decoding CBOR integers (major type 0 and 1) to Go interface{}.
+type IntDecMode int
+
+const (
+ // IntDecConvertNone affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
+ // It decodes CBOR unsigned integer (major type 0) to:
+ // - uint64
+ // It decodes CBOR negative integer (major type 1) to:
+ // - int64 if value fits
+ // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64
+ IntDecConvertNone IntDecMode = iota
+
+ // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
+ // It decodes CBOR integers (major type 0 and 1) to:
+ // - int64 if value fits
+ // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64
+ // - return UnmarshalTypeError if value > math.MaxInt64
+ // Deprecated: IntDecConvertSigned should not be used.
+ // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone.
+ IntDecConvertSigned
+
+ // IntDecConvertSignedOrFail affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
+ // It decodes CBOR integers (major type 0 and 1) to:
+ // - int64 if value fits
+ // - return UnmarshalTypeError if value doesn't fit into int64
+ IntDecConvertSignedOrFail
+
+ // IntDecConvertSignedOrBigInt affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
+ // It makes CBOR integers (major type 0 and 1) decode to:
+ // - int64 if value fits
+ // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64
+ IntDecConvertSignedOrBigInt
+
+ maxIntDec
+)
+
+func (idm IntDecMode) valid() bool {
+ return idm >= 0 && idm < maxIntDec
+}
+
+// MapKeyByteStringMode specifies how to decode CBOR byte string (major type 2)
+// as Go map key when decoding CBOR map key into an empty Go interface value.
+// Specifically, this option applies when decoding CBOR map into
+// - Go empty interface, or
+// - Go map with empty interface as key type.
+// The CBOR map key types handled by this option are
+// - byte string
+// - tagged byte string
+// - nested tagged byte string
+type MapKeyByteStringMode int
+
+const (
+ // MapKeyByteStringAllowed allows CBOR byte string to be decoded as Go map key.
+ // Since Go doesn't allow []byte as map key, CBOR byte string is decoded to
+ // ByteString which has underlying string type.
+ // This is the default setting.
+ MapKeyByteStringAllowed MapKeyByteStringMode = iota
+
+ // MapKeyByteStringForbidden forbids CBOR byte string being decoded as Go map key.
+ // Attempting to decode CBOR byte string as map key into empty interface value
+ // returns a decoding error.
+ MapKeyByteStringForbidden
+
+ maxMapKeyByteStringMode
+)
+
+func (mkbsm MapKeyByteStringMode) valid() bool {
+ return mkbsm >= 0 && mkbsm < maxMapKeyByteStringMode
+}
+
+// ExtraDecErrorCond specifies extra conditions that should be treated as errors.
+type ExtraDecErrorCond uint
+
+// ExtraDecErrorNone indicates no extra error condition.
+const ExtraDecErrorNone ExtraDecErrorCond = 0
+
+const (
+ // ExtraDecErrorUnknownField indicates error condition when destination
+ // Go struct doesn't have a field matching a CBOR map key.
+ ExtraDecErrorUnknownField ExtraDecErrorCond = 1 << iota
+
+ maxExtraDecError
+)
+
+func (ec ExtraDecErrorCond) valid() bool {
+ return ec < maxExtraDecError
+}
+
+// UTF8Mode option specifies if decoder should
+// decode CBOR Text containing invalid UTF-8 string.
+type UTF8Mode int
+
+const (
+ // UTF8RejectInvalid rejects CBOR Text containing
+ // invalid UTF-8 string.
+ UTF8RejectInvalid UTF8Mode = iota
+
+ // UTF8DecodeInvalid allows decoding CBOR Text containing
+ // invalid UTF-8 string.
+ UTF8DecodeInvalid
+
+ maxUTF8Mode
+)
+
+func (um UTF8Mode) valid() bool {
+ return um >= 0 && um < maxUTF8Mode
+}
+
+// FieldNameMatchingMode specifies how string keys in CBOR maps are matched to Go struct field names.
+type FieldNameMatchingMode int
+
+const (
+ // FieldNameMatchingPreferCaseSensitive prefers to decode map items into struct fields whose names (or tag
+ // names) exactly match the item's key. If there is no such field, a map item will be decoded into a field whose
+ // name is a case-insensitive match for the item's key.
+ FieldNameMatchingPreferCaseSensitive FieldNameMatchingMode = iota
+
+ // FieldNameMatchingCaseSensitive decodes map items only into a struct field whose name (or tag name) is an
+ // exact match for the item's key.
+ FieldNameMatchingCaseSensitive
+
+ maxFieldNameMatchingMode
+)
+
+func (fnmm FieldNameMatchingMode) valid() bool {
+ return fnmm >= 0 && fnmm < maxFieldNameMatchingMode
+}
+
+// BigIntDecMode specifies how to decode CBOR bignum to Go interface{}.
+type BigIntDecMode int
+
+const (
+ // BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int)
+ // when unmarshaling into a Go interface{}.
+ BigIntDecodeValue BigIntDecMode = iota
+
+ // BigIntDecodePointer makes CBOR bignum decode to *big.Int when
+ // unmarshaling into a Go interface{}.
+ BigIntDecodePointer
+
+ maxBigIntDecMode
+)
+
+func (bidm BigIntDecMode) valid() bool {
+ return bidm >= 0 && bidm < maxBigIntDecMode
+}
+
+// ByteStringToStringMode specifies the behavior when decoding a CBOR byte string into a Go string.
+type ByteStringToStringMode int
+
+const (
+ // ByteStringToStringForbidden generates an error on an attempt to decode a CBOR byte string into a Go string.
+ ByteStringToStringForbidden ByteStringToStringMode = iota
+
+ // ByteStringToStringAllowed permits decoding a CBOR byte string into a Go string.
+ ByteStringToStringAllowed
+
+ // ByteStringToStringAllowedWithExpectedLaterEncoding permits decoding a CBOR byte string
+ // into a Go string. Also, if the byte string is enclosed (directly or indirectly) by one of
+ // the "expected later encoding" tags (numbers 21 through 23), the destination string will
+ // be populated by applying the designated text encoding to the contents of the input byte
+ // string.
+ ByteStringToStringAllowedWithExpectedLaterEncoding
+
+ maxByteStringToStringMode
+)
+
+func (bstsm ByteStringToStringMode) valid() bool {
+ return bstsm >= 0 && bstsm < maxByteStringToStringMode
+}
+
+// FieldNameByteStringMode specifies the behavior when decoding a CBOR byte string map key as a Go struct field name.
+type FieldNameByteStringMode int
+
+const (
+ // FieldNameByteStringForbidden generates an error on an attempt to decode a CBOR byte string map key as a Go struct field name.
+ FieldNameByteStringForbidden FieldNameByteStringMode = iota
+
+ // FieldNameByteStringAllowed permits CBOR byte string map keys to be recognized as Go struct field names.
+ FieldNameByteStringAllowed
+
+ maxFieldNameByteStringMode
+)
+
+func (fnbsm FieldNameByteStringMode) valid() bool {
+ return fnbsm >= 0 && fnbsm < maxFieldNameByteStringMode
+}
+
+// UnrecognizedTagToAnyMode specifies how to decode unrecognized CBOR tag into an empty interface (any).
+// Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet.
+type UnrecognizedTagToAnyMode int
+
+const (
+ // UnrecognizedTagNumAndContentToAny decodes CBOR tag number and tag content to cbor.Tag
+ // when decoding unrecognized CBOR tag into an empty interface.
+ UnrecognizedTagNumAndContentToAny UnrecognizedTagToAnyMode = iota
+
+ // UnrecognizedTagContentToAny decodes only CBOR tag content (into its default type)
+ // when decoding unrecognized CBOR tag into an empty interface.
+ UnrecognizedTagContentToAny
+
+ maxUnrecognizedTagToAny
+)
+
+func (uttam UnrecognizedTagToAnyMode) valid() bool {
+ return uttam >= 0 && uttam < maxUnrecognizedTagToAny
+}
+
+// TimeTagToAnyMode specifies how to decode CBOR tag 0 and 1 into an empty interface (any).
+// Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format.
+type TimeTagToAnyMode int
+
+const (
+ // TimeTagToTime decodes CBOR tag 0 and 1 into a time.Time value
+ // when decoding tag 0 or 1 into an empty interface.
+ TimeTagToTime TimeTagToAnyMode = iota
+
+ // TimeTagToRFC3339 decodes CBOR tag 0 and 1 into a time string in RFC3339 format
+ // when decoding tag 0 or 1 into an empty interface.
+ TimeTagToRFC3339
+
+ // TimeTagToRFC3339Nano decodes CBOR tag 0 and 1 into a time string in RFC3339Nano format
+ // when decoding tag 0 or 1 into an empty interface.
+ TimeTagToRFC3339Nano
+
+ maxTimeTagToAnyMode
+)
+
+func (tttam TimeTagToAnyMode) valid() bool {
+ return tttam >= 0 && tttam < maxTimeTagToAnyMode
+}
+
+// SimpleValueRegistry is a registry of unmarshaling behaviors for each possible CBOR simple value
+// number (0...23 and 32...255).
+type SimpleValueRegistry struct {
+ rejected [256]bool
+}
+
+// WithRejectedSimpleValue registers the given simple value as rejected. If the simple value is
+// encountered in a CBOR input during unmarshaling, an UnacceptableDataItemError is returned.
+func WithRejectedSimpleValue(sv SimpleValue) func(*SimpleValueRegistry) error {
+ return func(r *SimpleValueRegistry) error {
+ if sv >= 24 && sv <= 31 {
+ return fmt.Errorf("cbor: cannot set analog for reserved simple value %d", sv)
+ }
+ r.rejected[sv] = true
+ return nil
+ }
+}
+
+// Creates a new SimpleValueRegistry. The registry state is initialized by executing the provided
+// functions in order against a registry that is pre-populated with the defaults for all well-formed
+// simple value numbers.
+func NewSimpleValueRegistryFromDefaults(fns ...func(*SimpleValueRegistry) error) (*SimpleValueRegistry, error) {
+ var r SimpleValueRegistry
+ for _, fn := range fns {
+ if err := fn(&r); err != nil {
+ return nil, err
+ }
+ }
+ return &r, nil
+}
+
+// NaNMode specifies how to decode floating-point values (major type 7, additional information 25
+// through 27) representing NaN (not-a-number).
+type NaNMode int
+
+const (
+ // NaNDecodeAllowed will decode NaN values to Go float32 or float64.
+ NaNDecodeAllowed NaNMode = iota
+
+ // NaNDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode a NaN value.
+ NaNDecodeForbidden
+
+ maxNaNDecode
+)
+
+func (ndm NaNMode) valid() bool {
+ return ndm >= 0 && ndm < maxNaNDecode
+}
+
+// InfMode specifies how to decode floating-point values (major type 7, additional information 25
+// through 27) representing positive or negative infinity.
+type InfMode int
+
+const (
+ // InfDecodeAllowed will decode infinite values to Go float32 or float64.
+ InfDecodeAllowed InfMode = iota
+
+ // InfDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode an
+ // infinite value.
+ InfDecodeForbidden
+
+ maxInfDecode
+)
+
+func (idm InfMode) valid() bool {
+ return idm >= 0 && idm < maxInfDecode
+}
+
+// ByteStringToTimeMode specifies the behavior when decoding a CBOR byte string into a Go time.Time.
+type ByteStringToTimeMode int
+
+const (
+ // ByteStringToTimeForbidden generates an error on an attempt to decode a CBOR byte string into a Go time.Time.
+ ByteStringToTimeForbidden ByteStringToTimeMode = iota
+
+ // ByteStringToTimeAllowed permits decoding a CBOR byte string into a Go time.Time.
+ ByteStringToTimeAllowed
+
+ maxByteStringToTimeMode
+)
+
+func (bttm ByteStringToTimeMode) valid() bool {
+ return bttm >= 0 && bttm < maxByteStringToTimeMode
+}
+
+// ByteStringExpectedFormatMode specifies how to decode CBOR byte string into Go byte slice
+// when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if
+// the CBOR byte string does not contain the expected format (e.g. base64) specified.
+// For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters"
+// in RFC 8949 Section 3.4.5.2.
+type ByteStringExpectedFormatMode int
+
+const (
+ // ByteStringExpectedFormatNone copies the unmodified CBOR byte string into Go byte slice
+ // if the byte string is not tagged by CBOR tag 21-23.
+ ByteStringExpectedFormatNone ByteStringExpectedFormatMode = iota
+
+ // ByteStringExpectedBase64URL expects CBOR byte strings to contain base64url-encoded bytes
+ // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode
+ // the base64url-encoded bytes into Go slice.
+ ByteStringExpectedBase64URL
+
+ // ByteStringExpectedBase64 expects CBOR byte strings to contain base64-encoded bytes
+ // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode
+ // the base64-encoded bytes into Go slice.
+ ByteStringExpectedBase64
+
+ // ByteStringExpectedBase16 expects CBOR byte strings to contain base16-encoded bytes
+ // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode
+ // the base16-encoded bytes into Go slice.
+ ByteStringExpectedBase16
+
+ maxByteStringExpectedFormatMode
+)
+
+func (bsefm ByteStringExpectedFormatMode) valid() bool {
+ return bsefm >= 0 && bsefm < maxByteStringExpectedFormatMode
+}
+
+// BignumTagMode specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can be
+// decoded.
+type BignumTagMode int
+
+const (
+ // BignumTagAllowed allows bignum tags to be decoded.
+ BignumTagAllowed BignumTagMode = iota
+
+ // BignumTagForbidden produces an UnacceptableDataItemError during Unmarshal if a bignum tag
+ // is encountered in the input.
+ BignumTagForbidden
+
+ maxBignumTag
+)
+
+func (btm BignumTagMode) valid() bool {
+ return btm >= 0 && btm < maxBignumTag
+}
+
+// BinaryUnmarshalerMode specifies how to decode into types that implement
+// encoding.BinaryUnmarshaler.
+type BinaryUnmarshalerMode int
+
+const (
+ // BinaryUnmarshalerByteString will invoke UnmarshalBinary on the contents of a CBOR byte
+ // string when decoding into a value that implements BinaryUnmarshaler.
+ BinaryUnmarshalerByteString BinaryUnmarshalerMode = iota
+
+ // BinaryUnmarshalerNone does not recognize BinaryUnmarshaler implementations during decode.
+ BinaryUnmarshalerNone
+
+ maxBinaryUnmarshalerMode
+)
+
+func (bum BinaryUnmarshalerMode) valid() bool {
+ return bum >= 0 && bum < maxBinaryUnmarshalerMode
+}
+
+// TextUnmarshalerMode specifies how to decode into types that implement
+// encoding.TextUnmarshaler.
+type TextUnmarshalerMode int
+
+const (
+ // TextUnmarshalerNone does not recognize TextUnmarshaler implementations during decode.
+ TextUnmarshalerNone TextUnmarshalerMode = iota
+
+ // TextUnmarshalerTextString will invoke UnmarshalText on the contents of a CBOR text
+ // string when decoding into a value that implements TextUnmarshaler.
+ TextUnmarshalerTextString
+
+ maxTextUnmarshalerMode
+)
+
+func (tum TextUnmarshalerMode) valid() bool {
+ return tum >= 0 && tum < maxTextUnmarshalerMode
+}
+
+// DecOptions specifies decoding options.
+type DecOptions struct {
+ // DupMapKey specifies whether to enforce duplicate map key.
+ DupMapKey DupMapKeyMode
+
+ // TimeTag specifies whether or not untagged data items, or tags other
+ // than tag 0 and tag 1, can be decoded to time.Time. If tag 0 or tag 1
+ // appears in an input, the type of its content is always validated as
+ // specified in RFC 8949. That behavior is not controlled by this
+ // option. The behavior of the supported modes are:
+ //
+ // DecTagIgnored (default): Untagged text strings and text strings
+ // enclosed in tags other than 0 and 1 are decoded as though enclosed
+ // in tag 0. Untagged unsigned integers, negative integers, and
+ // floating-point numbers (or those enclosed in tags other than 0 and
+ // 1) are decoded as though enclosed in tag 1. Decoding a tag other
+ // than 0 or 1 enclosing simple values null or undefined into a
+ // time.Time does not modify the destination value.
+ //
+ // DecTagOptional: Untagged text strings are decoded as though
+ // enclosed in tag 0. Untagged unsigned integers, negative integers,
+ // and floating-point numbers are decoded as though enclosed in tag
+ // 1. Tags other than 0 and 1 will produce an error on attempts to
+ // decode them into a time.Time.
+ //
+ // DecTagRequired: Only tags 0 and 1 can be decoded to time.Time. Any
+ // other input will produce an error.
+ TimeTag DecTagMode
+
+ // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags.
+ // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can
+ // require larger amounts of stack to deserialize. Don't increase this higher than you require.
+ MaxNestedLevels int
+
+ // MaxArrayElements specifies the max number of elements for CBOR arrays.
+ // Default is 128*1024=131072 and it can be set to [16, 2147483647]
+ MaxArrayElements int
+
+ // MaxMapPairs specifies the max number of key-value pairs for CBOR maps.
+ // Default is 128*1024=131072 and it can be set to [16, 2147483647]
+ MaxMapPairs int
+
+ // IndefLength specifies whether to allow indefinite length CBOR items.
+ IndefLength IndefLengthMode
+
+ // TagsMd specifies whether to allow CBOR tags (major type 6).
+ TagsMd TagsMode
+
+ // IntDec specifies which Go integer type (int64, uint64, or [big.Int]) to use
+ // when decoding CBOR int (major type 0 and 1) to Go interface{}.
+ IntDec IntDecMode
+
+ // MapKeyByteString specifies how to decode CBOR byte string as map key
+ // when decoding CBOR map with byte string key into an empty interface value.
+ // By default, an error is returned when attempting to decode CBOR byte string
+ // as map key because Go doesn't allow []byte as map key.
+ MapKeyByteString MapKeyByteStringMode
+
+ // ExtraReturnErrors specifies extra conditions that should be treated as errors.
+ ExtraReturnErrors ExtraDecErrorCond
+
+ // DefaultMapType specifies Go map type to create and decode to
+ // when unmarshaling CBOR into an empty interface value.
+ // By default, unmarshal uses map[interface{}]interface{}.
+ DefaultMapType reflect.Type
+
+ // UTF8 specifies if decoder should decode CBOR Text containing invalid UTF-8.
+ // By default, unmarshal rejects CBOR text containing invalid UTF-8.
+ UTF8 UTF8Mode
+
+ // FieldNameMatching specifies how string keys in CBOR maps are matched to Go struct field names.
+ FieldNameMatching FieldNameMatchingMode
+
+ // BigIntDec specifies how to decode CBOR bignum to Go interface{}.
+ BigIntDec BigIntDecMode
+
+ // DefaultByteStringType is the Go type that should be produced when decoding a CBOR byte
+ // string into an empty interface value. Types to which a []byte is convertible are valid
+ // for this option, except for array and pointer-to-array types. If nil, the default is
+ // []byte.
+ DefaultByteStringType reflect.Type
+
+ // ByteStringToString specifies the behavior when decoding a CBOR byte string into a Go string.
+ ByteStringToString ByteStringToStringMode
+
+ // FieldNameByteString specifies the behavior when decoding a CBOR byte string map key as a
+ // Go struct field name.
+ FieldNameByteString FieldNameByteStringMode
+
+ // UnrecognizedTagToAny specifies how to decode unrecognized CBOR tag into an empty interface.
+ // Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet.
+ UnrecognizedTagToAny UnrecognizedTagToAnyMode
+
+ // TimeTagToAny specifies how to decode CBOR tag 0 and 1 into an empty interface (any).
+ // Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format.
+ TimeTagToAny TimeTagToAnyMode
+
+ // SimpleValues is an immutable mapping from each CBOR simple value to a corresponding
+ // unmarshal behavior. If nil, the simple values false, true, null, and undefined are mapped
+ // to the Go analog values false, true, nil, and nil, respectively, and all other simple
+ // values N (except the reserved simple values 24 through 31) are mapped to
+ // cbor.SimpleValue(N). In other words, all well-formed simple values can be decoded.
+ //
+ // Users may provide a custom SimpleValueRegistry constructed via
+ // NewSimpleValueRegistryFromDefaults.
+ SimpleValues *SimpleValueRegistry
+
+ // NaN specifies how to decode floating-point values (major type 7, additional information
+ // 25 through 27) representing NaN (not-a-number).
+ NaN NaNMode
+
+ // Inf specifies how to decode floating-point values (major type 7, additional information
+ // 25 through 27) representing positive or negative infinity.
+ Inf InfMode
+
+ // ByteStringToTime specifies how to decode CBOR byte string into Go time.Time.
+ ByteStringToTime ByteStringToTimeMode
+
+ // ByteStringExpectedFormat specifies how to decode CBOR byte string into Go byte slice
+ // when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if
+ // the CBOR byte string does not contain the expected format (e.g. base64) specified.
+ // For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters"
+ // in RFC 8949 Section 3.4.5.2.
+ ByteStringExpectedFormat ByteStringExpectedFormatMode
+
+ // BignumTag specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can
+ // be decoded. Unlike BigIntDec, this option applies to all bignum tags encountered in a
+ // CBOR input, independent of the type of the destination value of a particular Unmarshal
+ // operation.
+ BignumTag BignumTagMode
+
+ // BinaryUnmarshaler specifies how to decode into types that implement
+ // encoding.BinaryUnmarshaler.
+ BinaryUnmarshaler BinaryUnmarshalerMode
+
+ // TextUnmarshaler specifies how to decode into types that implement
+ // encoding.TextUnmarshaler.
+ TextUnmarshaler TextUnmarshalerMode
+
+ // JSONUnmarshalerTranscoder sets the transcoding scheme used to unmarshal types that
+ // implement json.Unmarshaler but do not also implement cbor.Unmarshaler. If nil, decoding
+ // behavior is not influenced by whether or not a type implements json.Unmarshaler.
+ JSONUnmarshalerTranscoder Transcoder
+}
+
+// DecMode returns DecMode with immutable options and no tags (safe for concurrency).
+func (opts DecOptions) DecMode() (DecMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.decMode()
+}
+
+// validForTags checks that the provided tag set is compatible with these options and returns a
+// non-nil error if and only if the provided tag set is incompatible.
+func (opts DecOptions) validForTags(tags TagSet) error { //nolint:gocritic // ignore hugeParam
+ if opts.TagsMd == TagsForbidden {
+ return errors.New("cbor: cannot create DecMode with TagSet when TagsMd is TagsForbidden")
+ }
+ if tags == nil {
+ return errors.New("cbor: cannot create DecMode with nil value as TagSet")
+ }
+ if opts.ByteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding ||
+ opts.ByteStringExpectedFormat != ByteStringExpectedFormatNone {
+ for _, tagNum := range []uint64{
+ tagNumExpectedLaterEncodingBase64URL,
+ tagNumExpectedLaterEncodingBase64,
+ tagNumExpectedLaterEncodingBase16,
+ } {
+ if rt := tags.getTypeFromTagNum([]uint64{tagNum}); rt != nil {
+ return fmt.Errorf("cbor: DecMode with non-default StringExpectedEncoding or ByteSliceExpectedEncoding treats tag %d as built-in and conflicts with the provided TagSet's registration of %v", tagNum, rt)
+ }
+ }
+
+ }
+ return nil
+}
+
+// DecModeWithTags returns DecMode with options and tags that are both immutable (safe for concurrency).
+func (opts DecOptions) DecModeWithTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam
+ if err := opts.validForTags(tags); err != nil {
+ return nil, err
+ }
+ dm, err := opts.decMode()
+ if err != nil {
+ return nil, err
+ }
+
+ // Copy tags
+ ts := tagSet(make(map[reflect.Type]*tagItem))
+ syncTags := tags.(*syncTagSet)
+ syncTags.RLock()
+ for contentType, tag := range syncTags.t {
+ if tag.opts.DecTag != DecTagIgnored {
+ ts[contentType] = tag
+ }
+ }
+ syncTags.RUnlock()
+
+ if len(ts) > 0 {
+ dm.tags = ts
+ }
+
+ return dm, nil
+}
+
+// DecModeWithSharedTags returns DecMode with immutable options and mutable shared tags (safe for concurrency).
+func (opts DecOptions) DecModeWithSharedTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam
+ if err := opts.validForTags(tags); err != nil {
+ return nil, err
+ }
+ dm, err := opts.decMode()
+ if err != nil {
+ return nil, err
+ }
+ dm.tags = tags
+ return dm, nil
+}
+
+const (
+ defaultMaxArrayElements = 131072
+ minMaxArrayElements = 16
+ maxMaxArrayElements = 2147483647
+
+ defaultMaxMapPairs = 131072
+ minMaxMapPairs = 16
+ maxMaxMapPairs = 2147483647
+
+ defaultMaxNestedLevels = 32
+ minMaxNestedLevels = 4
+ maxMaxNestedLevels = 65535
+)
+
+var defaultSimpleValues = func() *SimpleValueRegistry {
+ registry, err := NewSimpleValueRegistryFromDefaults()
+ if err != nil {
+ panic(err)
+ }
+ return registry
+}()
+
+//nolint:gocyclo // Each option comes with some manageable boilerplate
+func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore hugeParam
+ if !opts.DupMapKey.valid() {
+ return nil, errors.New("cbor: invalid DupMapKey " + strconv.Itoa(int(opts.DupMapKey)))
+ }
+
+ if !opts.TimeTag.valid() {
+ return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag)))
+ }
+
+ if !opts.IndefLength.valid() {
+ return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength)))
+ }
+
+ if !opts.TagsMd.valid() {
+ return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd)))
+ }
+
+ if !opts.IntDec.valid() {
+ return nil, errors.New("cbor: invalid IntDec " + strconv.Itoa(int(opts.IntDec)))
+ }
+
+ if !opts.MapKeyByteString.valid() {
+ return nil, errors.New("cbor: invalid MapKeyByteString " + strconv.Itoa(int(opts.MapKeyByteString)))
+ }
+
+ if opts.MaxNestedLevels == 0 {
+ opts.MaxNestedLevels = defaultMaxNestedLevels
+ } else if opts.MaxNestedLevels < minMaxNestedLevels || opts.MaxNestedLevels > maxMaxNestedLevels {
+ return nil, errors.New("cbor: invalid MaxNestedLevels " + strconv.Itoa(opts.MaxNestedLevels) +
+ " (range is [" + strconv.Itoa(minMaxNestedLevels) + ", " + strconv.Itoa(maxMaxNestedLevels) + "])")
+ }
+
+ if opts.MaxArrayElements == 0 {
+ opts.MaxArrayElements = defaultMaxArrayElements
+ } else if opts.MaxArrayElements < minMaxArrayElements || opts.MaxArrayElements > maxMaxArrayElements {
+ return nil, errors.New("cbor: invalid MaxArrayElements " + strconv.Itoa(opts.MaxArrayElements) +
+ " (range is [" + strconv.Itoa(minMaxArrayElements) + ", " + strconv.Itoa(maxMaxArrayElements) + "])")
+ }
+
+ if opts.MaxMapPairs == 0 {
+ opts.MaxMapPairs = defaultMaxMapPairs
+ } else if opts.MaxMapPairs < minMaxMapPairs || opts.MaxMapPairs > maxMaxMapPairs {
+ return nil, errors.New("cbor: invalid MaxMapPairs " + strconv.Itoa(opts.MaxMapPairs) +
+ " (range is [" + strconv.Itoa(minMaxMapPairs) + ", " + strconv.Itoa(maxMaxMapPairs) + "])")
+ }
+
+ if !opts.ExtraReturnErrors.valid() {
+ return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors)))
+ }
+
+ if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map {
+ return nil, fmt.Errorf("cbor: invalid DefaultMapType %s", opts.DefaultMapType)
+ }
+
+ if !opts.UTF8.valid() {
+ return nil, errors.New("cbor: invalid UTF8 " + strconv.Itoa(int(opts.UTF8)))
+ }
+
+ if !opts.FieldNameMatching.valid() {
+ return nil, errors.New("cbor: invalid FieldNameMatching " + strconv.Itoa(int(opts.FieldNameMatching)))
+ }
+
+ if !opts.BigIntDec.valid() {
+ return nil, errors.New("cbor: invalid BigIntDec " + strconv.Itoa(int(opts.BigIntDec)))
+ }
+
+ if opts.DefaultByteStringType != nil &&
+ opts.DefaultByteStringType.Kind() != reflect.String &&
+ (opts.DefaultByteStringType.Kind() != reflect.Slice || opts.DefaultByteStringType.Elem().Kind() != reflect.Uint8) {
+ return nil, fmt.Errorf("cbor: invalid DefaultByteStringType: %s is not of kind string or []uint8", opts.DefaultByteStringType)
+ }
+
+ if !opts.ByteStringToString.valid() {
+ return nil, errors.New("cbor: invalid ByteStringToString " + strconv.Itoa(int(opts.ByteStringToString)))
+ }
+
+ if !opts.FieldNameByteString.valid() {
+ return nil, errors.New("cbor: invalid FieldNameByteString " + strconv.Itoa(int(opts.FieldNameByteString)))
+ }
+
+ if !opts.UnrecognizedTagToAny.valid() {
+ return nil, errors.New("cbor: invalid UnrecognizedTagToAnyMode " + strconv.Itoa(int(opts.UnrecognizedTagToAny)))
+ }
+ simpleValues := opts.SimpleValues
+ if simpleValues == nil {
+ simpleValues = defaultSimpleValues
+ }
+
+ if !opts.TimeTagToAny.valid() {
+ return nil, errors.New("cbor: invalid TimeTagToAny " + strconv.Itoa(int(opts.TimeTagToAny)))
+ }
+
+ if !opts.NaN.valid() {
+ return nil, errors.New("cbor: invalid NaNDec " + strconv.Itoa(int(opts.NaN)))
+ }
+
+ if !opts.Inf.valid() {
+ return nil, errors.New("cbor: invalid InfDec " + strconv.Itoa(int(opts.Inf)))
+ }
+
+ if !opts.ByteStringToTime.valid() {
+ return nil, errors.New("cbor: invalid ByteStringToTime " + strconv.Itoa(int(opts.ByteStringToTime)))
+ }
+
+ if !opts.ByteStringExpectedFormat.valid() {
+ return nil, errors.New("cbor: invalid ByteStringExpectedFormat " + strconv.Itoa(int(opts.ByteStringExpectedFormat)))
+ }
+
+ if !opts.BignumTag.valid() {
+ return nil, errors.New("cbor: invalid BignumTag " + strconv.Itoa(int(opts.BignumTag)))
+ }
+
+ if !opts.BinaryUnmarshaler.valid() {
+ return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler)))
+ }
+
+ if !opts.TextUnmarshaler.valid() {
+ return nil, errors.New("cbor: invalid TextUnmarshaler " + strconv.Itoa(int(opts.TextUnmarshaler)))
+ }
+
+ dm := decMode{
+ dupMapKey: opts.DupMapKey,
+ timeTag: opts.TimeTag,
+ maxNestedLevels: opts.MaxNestedLevels,
+ maxArrayElements: opts.MaxArrayElements,
+ maxMapPairs: opts.MaxMapPairs,
+ indefLength: opts.IndefLength,
+ tagsMd: opts.TagsMd,
+ intDec: opts.IntDec,
+ mapKeyByteString: opts.MapKeyByteString,
+ extraReturnErrors: opts.ExtraReturnErrors,
+ defaultMapType: opts.DefaultMapType,
+ utf8: opts.UTF8,
+ fieldNameMatching: opts.FieldNameMatching,
+ bigIntDec: opts.BigIntDec,
+ defaultByteStringType: opts.DefaultByteStringType,
+ byteStringToString: opts.ByteStringToString,
+ fieldNameByteString: opts.FieldNameByteString,
+ unrecognizedTagToAny: opts.UnrecognizedTagToAny,
+ timeTagToAny: opts.TimeTagToAny,
+ simpleValues: simpleValues,
+ nanDec: opts.NaN,
+ infDec: opts.Inf,
+ byteStringToTime: opts.ByteStringToTime,
+ byteStringExpectedFormat: opts.ByteStringExpectedFormat,
+ bignumTag: opts.BignumTag,
+ binaryUnmarshaler: opts.BinaryUnmarshaler,
+ textUnmarshaler: opts.TextUnmarshaler,
+ jsonUnmarshalerTranscoder: opts.JSONUnmarshalerTranscoder,
+ }
+
+ return &dm, nil
+}
+
+// DecMode is the main interface for CBOR decoding.
+type DecMode interface {
+ // Unmarshal parses the CBOR-encoded data into the value pointed to by v
+ // using the decoding mode. If v is nil, not a pointer, or a nil pointer,
+ // Unmarshal returns an error.
+ //
+ // See the documentation for Unmarshal for details.
+ Unmarshal(data []byte, v any) error
+
+ // UnmarshalFirst parses the first CBOR data item into the value pointed to by v
+ // using the decoding mode. Any remaining bytes are returned in rest.
+ //
+ // If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
+ //
+ // See the documentation for Unmarshal for details.
+ UnmarshalFirst(data []byte, v any) (rest []byte, err error)
+
+ // Valid checks whether data is a well-formed encoded CBOR data item and
+ // that it complies with configurable restrictions such as MaxNestedLevels,
+ // MaxArrayElements, MaxMapPairs, etc.
+ //
+ // If there are any remaining bytes after the CBOR data item,
+ // an ExtraneousDataError is returned.
+ //
+ // WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity)
+ // and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed".
+ //
+ // Deprecated: Valid is kept for compatibility and should not be used.
+ // Use Wellformed instead because it has a more appropriate name.
+ Valid(data []byte) error
+
+ // Wellformed checks whether data is a well-formed encoded CBOR data item and
+ // that it complies with configurable restrictions such as MaxNestedLevels,
+ // MaxArrayElements, MaxMapPairs, etc.
+ //
+ // If there are any remaining bytes after the CBOR data item,
+ // an ExtraneousDataError is returned.
+ Wellformed(data []byte) error
+
+ // NewDecoder returns a new decoder that reads from r using dm DecMode.
+ NewDecoder(r io.Reader) *Decoder
+
+ // DecOptions returns user specified options used to create this DecMode.
+ DecOptions() DecOptions
+}
+
+type decMode struct {
+ tags tagProvider
+ dupMapKey DupMapKeyMode
+ timeTag DecTagMode
+ maxNestedLevels int
+ maxArrayElements int
+ maxMapPairs int
+ indefLength IndefLengthMode
+ tagsMd TagsMode
+ intDec IntDecMode
+ mapKeyByteString MapKeyByteStringMode
+ extraReturnErrors ExtraDecErrorCond
+ defaultMapType reflect.Type
+ utf8 UTF8Mode
+ fieldNameMatching FieldNameMatchingMode
+ bigIntDec BigIntDecMode
+ defaultByteStringType reflect.Type
+ byteStringToString ByteStringToStringMode
+ fieldNameByteString FieldNameByteStringMode
+ unrecognizedTagToAny UnrecognizedTagToAnyMode
+ timeTagToAny TimeTagToAnyMode
+ simpleValues *SimpleValueRegistry
+ nanDec NaNMode
+ infDec InfMode
+ byteStringToTime ByteStringToTimeMode
+ byteStringExpectedFormat ByteStringExpectedFormatMode
+ bignumTag BignumTagMode
+ binaryUnmarshaler BinaryUnmarshalerMode
+ textUnmarshaler TextUnmarshalerMode
+ jsonUnmarshalerTranscoder Transcoder
+}
+
+var defaultDecMode, _ = DecOptions{}.decMode()
+
+// DecOptions returns user specified options used to create this DecMode.
+func (dm *decMode) DecOptions() DecOptions {
+ simpleValues := dm.simpleValues
+ if simpleValues == defaultSimpleValues {
+ // Users can't explicitly set this to defaultSimpleValues. It must have been nil in
+ // the original DecOptions.
+ simpleValues = nil
+ }
+
+ return DecOptions{
+ DupMapKey: dm.dupMapKey,
+ TimeTag: dm.timeTag,
+ MaxNestedLevels: dm.maxNestedLevels,
+ MaxArrayElements: dm.maxArrayElements,
+ MaxMapPairs: dm.maxMapPairs,
+ IndefLength: dm.indefLength,
+ TagsMd: dm.tagsMd,
+ IntDec: dm.intDec,
+ MapKeyByteString: dm.mapKeyByteString,
+ ExtraReturnErrors: dm.extraReturnErrors,
+ DefaultMapType: dm.defaultMapType,
+ UTF8: dm.utf8,
+ FieldNameMatching: dm.fieldNameMatching,
+ BigIntDec: dm.bigIntDec,
+ DefaultByteStringType: dm.defaultByteStringType,
+ ByteStringToString: dm.byteStringToString,
+ FieldNameByteString: dm.fieldNameByteString,
+ UnrecognizedTagToAny: dm.unrecognizedTagToAny,
+ TimeTagToAny: dm.timeTagToAny,
+ SimpleValues: simpleValues,
+ NaN: dm.nanDec,
+ Inf: dm.infDec,
+ ByteStringToTime: dm.byteStringToTime,
+ ByteStringExpectedFormat: dm.byteStringExpectedFormat,
+ BignumTag: dm.bignumTag,
+ BinaryUnmarshaler: dm.binaryUnmarshaler,
+ TextUnmarshaler: dm.textUnmarshaler,
+ JSONUnmarshalerTranscoder: dm.jsonUnmarshalerTranscoder,
+ }
+}
+
+// Unmarshal parses the CBOR-encoded data into the value pointed to by v
+// using dm decoding mode. If v is nil, not a pointer, or a nil pointer,
+// Unmarshal returns an error.
+//
+// See the documentation for Unmarshal for details.
+func (dm *decMode) Unmarshal(data []byte, v any) error {
+ d := decoder{data: data, dm: dm}
+
+ // Check well-formedness.
+ off := d.off // Save offset before data validation
+ err := d.wellformed(false, false) // don't allow any extra data after valid data item.
+ d.off = off // Restore offset
+ if err != nil {
+ return err
+ }
+
+ return d.value(v)
+}
+
+// UnmarshalFirst parses the first CBOR data item into the value pointed to by v
+// using dm decoding mode. Any remaining bytes are returned in rest.
+//
+// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
+//
+// See the documentation for Unmarshal for details.
+func (dm *decMode) UnmarshalFirst(data []byte, v any) (rest []byte, err error) {
+ d := decoder{data: data, dm: dm}
+
+ // check well-formedness.
+ off := d.off // Save offset before data validation
+ err = d.wellformed(true, false) // allow extra data after well-formed data item
+ d.off = off // Restore offset
+
+ // If it is well-formed, parse the value. This is structured like this to allow
+ // better test coverage
+ if err == nil {
+ err = d.value(v)
+ }
+
+ // If either wellformed or value returned an error, do not return rest bytes
+ if err != nil {
+ return nil, err
+ }
+
+ // Return the rest of the data slice (which might be len 0)
+ return d.data[d.off:], nil
+}
+
+// Valid checks whether data is a well-formed encoded CBOR data item and
+// that it complies with configurable restrictions such as MaxNestedLevels,
+// MaxArrayElements, MaxMapPairs, etc.
+//
+// If there are any remaining bytes after the CBOR data item,
+// an ExtraneousDataError is returned.
+//
+// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity)
+// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed".
+//
+// Deprecated: Valid is kept for compatibility and should not be used.
+// Use Wellformed instead because it has a more appropriate name.
+func (dm *decMode) Valid(data []byte) error {
+ return dm.Wellformed(data)
+}
+
+// Wellformed checks whether data is a well-formed encoded CBOR data item and
+// that it complies with configurable restrictions such as MaxNestedLevels,
+// MaxArrayElements, MaxMapPairs, etc.
+//
+// If there are any remaining bytes after the CBOR data item,
+// an ExtraneousDataError is returned.
+func (dm *decMode) Wellformed(data []byte) error {
+ d := decoder{data: data, dm: dm}
+ return d.wellformed(false, false)
+}
+
+// NewDecoder returns a new decoder that reads from r using dm DecMode.
+func (dm *decMode) NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r, d: decoder{dm: dm}}
+}
+
+type decoder struct {
+ data []byte
+ off int // next read offset in data
+ dm *decMode
+
+ // expectedLaterEncodingTags stores a stack of encountered "Expected Later Encoding" tags,
+ // if any.
+ //
+ // The "Expected Later Encoding" tags (21 to 23) are valid for any data item. When decoding
+ // byte strings, the effective encoding comes from the tag nearest to the byte string being
+ // decoded. For example, the effective encoding of the byte string 21(22(h'41')) would be
+ // controlled by tag 22,and in the data item 23(h'42', 22([21(h'43')])]) the effective
+ // encoding of the byte strings h'42' and h'43' would be controlled by tag 23 and 21,
+ // respectively.
+ expectedLaterEncodingTags []uint64
+}
+
+// value decodes CBOR data item into the value pointed to by v.
+// If CBOR data item fails to be decoded into v,
+// error is returned and offset is moved to the next CBOR data item.
+// Precondition: d.data contains at least one well-formed CBOR data item.
+func (d *decoder) value(v any) error {
+ // v can't be nil, non-pointer, or nil pointer value.
+ if v == nil {
+ return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"}
+ }
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Pointer {
+ return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"}
+ } else if rv.IsNil() {
+ return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"}
+ }
+ rv = rv.Elem()
+ return d.parseToValue(rv, getTypeInfo(rv.Type()))
+}
+
+// parseToValue decodes CBOR data to value. It assumes data is well-formed,
+// and does not perform bounds checking.
+func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo
+
+ // Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil.
+ if d.nextCBORNil() && v.Kind() == reflect.Pointer {
+ d.skip()
+ v.SetZero()
+ return nil
+ }
+
+ if tInfo.spclType == specialTypeIface {
+ if !v.IsNil() {
+ // Use value type
+ v = v.Elem()
+ tInfo = getTypeInfo(v.Type())
+ } else { //nolint:gocritic
+ // Create and use registered type if CBOR data is registered tag
+ if d.dm.tags != nil && d.nextCBORType() == cborTypeTag {
+
+ off := d.off
+ var tagNums []uint64
+ for d.nextCBORType() == cborTypeTag {
+ _, _, tagNum := d.getHead()
+ tagNums = append(tagNums, tagNum)
+ }
+ d.off = off
+
+ registeredType := d.dm.tags.getTypeFromTagNum(tagNums)
+ if registeredType != nil {
+ if registeredType.Implements(tInfo.nonPtrType) ||
+ reflect.PointerTo(registeredType).Implements(tInfo.nonPtrType) {
+ v.Set(reflect.New(registeredType))
+ v = v.Elem()
+ tInfo = getTypeInfo(registeredType)
+ }
+ }
+ }
+ }
+ }
+
+ // Create new value for the pointer v to point to.
+ // At this point, CBOR value is not nil/undefined if v is a pointer.
+ for v.Kind() == reflect.Pointer {
+ if v.IsNil() {
+ if !v.CanSet() {
+ d.skip()
+ return errors.New("cbor: cannot set new value for " + v.Type().String())
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+
+ // Strip self-described CBOR tag number.
+ for d.nextCBORType() == cborTypeTag {
+ off := d.off
+ _, _, tagNum := d.getHead()
+ if tagNum != tagNumSelfDescribedCBOR {
+ d.off = off
+ break
+ }
+ }
+
+ // Check validity of supported built-in tags.
+ off := d.off
+ for d.nextCBORType() == cborTypeTag {
+ _, _, tagNum := d.getHead()
+ if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil {
+ d.skip()
+ return err
+ }
+ }
+ d.off = off
+
+ if tInfo.spclType != specialTypeNone {
+ switch tInfo.spclType {
+ case specialTypeEmptyIface:
+ iv, err := d.parse(false) // Skipped self-described CBOR tag number already.
+ if iv != nil {
+ v.Set(reflect.ValueOf(iv))
+ }
+ return err
+
+ case specialTypeTag:
+ return d.parseToTag(v)
+
+ case specialTypeTime:
+ if d.nextCBORNil() {
+ // Decoding CBOR null and undefined to time.Time is no-op.
+ d.skip()
+ return nil
+ }
+ tm, ok, err := d.parseToTime()
+ if err != nil {
+ return err
+ }
+ if ok {
+ v.Set(reflect.ValueOf(tm))
+ }
+ return nil
+
+ case specialTypeUnmarshalerIface:
+ return d.parseToUnmarshaler(v)
+
+ case specialTypeUnexportedUnmarshalerIface:
+ return d.parseToUnexportedUnmarshaler(v)
+
+ case specialTypeJSONUnmarshalerIface:
+ // This special type implies that the type does not also implement
+ // cbor.Umarshaler.
+ if d.dm.jsonUnmarshalerTranscoder == nil {
+ break
+ }
+ return d.parseToJSONUnmarshaler(v)
+ }
+ }
+
+ // Check registered tag number
+ if tagItem := d.getRegisteredTagItem(tInfo.nonPtrType); tagItem != nil {
+ t := d.nextCBORType()
+ if t != cborTypeTag {
+ if tagItem.opts.DecTag == DecTagRequired {
+ d.skip() // Required tag number is absent, skip entire tag
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.typ.String(),
+ errorMsg: "expect CBOR tag value"}
+ }
+ } else if err := d.validRegisteredTagNums(tagItem); err != nil {
+ d.skip() // Skip tag content
+ return err
+ }
+ }
+
+ t := d.nextCBORType()
+
+ switch t {
+ case cborTypePositiveInt:
+ _, _, val := d.getHead()
+ return fillPositiveInt(t, val, v)
+
+ case cborTypeNegativeInt:
+ _, _, val := d.getHead()
+ if val > math.MaxInt64 {
+ // CBOR negative integer overflows int64, use big.Int to store value.
+ bi := new(big.Int)
+ bi.SetUint64(val)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+
+ if tInfo.nonPtrType == typeBigInt {
+ v.Set(reflect.ValueOf(*bi))
+ return nil
+ }
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: bi.String() + " overflows Go's int64",
+ }
+ }
+ nValue := int64(-1) ^ int64(val)
+ return fillNegativeInt(t, nValue, v)
+
+ case cborTypeByteString:
+ b, copied := d.parseByteString()
+ b, converted, err := d.applyByteStringTextConversion(b, v.Type())
+ if err != nil {
+ return err
+ }
+ copied = copied || converted
+ return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
+
+ case cborTypeTextString:
+ b, err := d.parseTextString()
+ if err != nil {
+ return err
+ }
+ return fillTextString(t, b, v, d.dm.textUnmarshaler)
+
+ case cborTypePrimitives:
+ _, ai, val := d.getHead()
+ switch ai {
+ case additionalInformationAsFloat16:
+ f := float64(float16.Frombits(uint16(val)).Float32())
+ return fillFloat(t, f, v)
+
+ case additionalInformationAsFloat32:
+ f := float64(math.Float32frombits(uint32(val)))
+ return fillFloat(t, f, v)
+
+ case additionalInformationAsFloat64:
+ f := math.Float64frombits(val)
+ return fillFloat(t, f, v)
+
+ default: // ai <= 24
+ if d.dm.simpleValues.rejected[SimpleValue(val)] {
+ return &UnacceptableDataItemError{
+ CBORType: t.String(),
+ Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized",
+ }
+ }
+
+ switch ai {
+ case additionalInformationAsFalse,
+ additionalInformationAsTrue:
+ return fillBool(t, ai == additionalInformationAsTrue, v)
+
+ case additionalInformationAsNull,
+ additionalInformationAsUndefined:
+ return fillNil(t, v)
+
+ default:
+ return fillPositiveInt(t, val, v)
+ }
+ }
+
+ case cborTypeTag:
+ _, _, tagNum := d.getHead()
+ switch tagNum {
+ case tagNumUnsignedBignum:
+ // Bignum (tag 2) can be decoded to uint, int, float, slice, array, or big.Int.
+ b, copied := d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+
+ if tInfo.nonPtrType == typeBigInt {
+ v.Set(reflect.ValueOf(*bi))
+ return nil
+ }
+ if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array {
+ return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
+ }
+ if bi.IsUint64() {
+ return fillPositiveInt(t, bi.Uint64(), v)
+ }
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: bi.String() + " overflows " + v.Type().String(),
+ }
+
+ case tagNumNegativeBignum:
+ // Bignum (tag 3) can be decoded to int, float, slice, array, or big.Int.
+ b, copied := d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+
+ if tInfo.nonPtrType == typeBigInt {
+ v.Set(reflect.ValueOf(*bi))
+ return nil
+ }
+ if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array {
+ return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
+ }
+ if bi.IsInt64() {
+ return fillNegativeInt(t, bi.Int64(), v)
+ }
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: bi.String() + " overflows " + v.Type().String(),
+ }
+
+ case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16:
+ // If conversion for interoperability with text encodings is not configured,
+ // treat tags 21-23 as unregistered tags.
+ if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone {
+ d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum)
+ defer func() {
+ d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1]
+ }()
+ }
+ }
+
+ return d.parseToValue(v, tInfo)
+
+ case cborTypeArray:
+ if tInfo.nonPtrKind == reflect.Slice {
+ return d.parseArrayToSlice(v, tInfo)
+ } else if tInfo.nonPtrKind == reflect.Array {
+ return d.parseArrayToArray(v, tInfo)
+ } else if tInfo.nonPtrKind == reflect.Struct {
+ return d.parseArrayToStruct(v, tInfo)
+ }
+ d.skip()
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()}
+
+ case cborTypeMap:
+ if tInfo.nonPtrKind == reflect.Struct {
+ return d.parseMapToStruct(v, tInfo)
+ } else if tInfo.nonPtrKind == reflect.Map {
+ return d.parseMapToMap(v, tInfo)
+ }
+ d.skip()
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()}
+ }
+
+ return nil
+}
+
+func (d *decoder) parseToTag(v reflect.Value) error {
+ if d.nextCBORNil() {
+ // Decoding CBOR null and undefined to cbor.Tag is no-op.
+ d.skip()
+ return nil
+ }
+
+ t := d.nextCBORType()
+ if t != cborTypeTag {
+ d.skip()
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: typeTag.String()}
+ }
+
+ // Unmarshal tag number
+ _, _, num := d.getHead()
+
+ // Unmarshal tag content
+ content, err := d.parse(false)
+ if err != nil {
+ return err
+ }
+
+ v.Set(reflect.ValueOf(Tag{num, content}))
+ return nil
+}
+
+// parseToTime decodes the current data item as a time.Time. The bool return value is false if and
+// only if the destination value should remain unmodified.
+func (d *decoder) parseToTime() (time.Time, bool, error) {
+ // Verify that tag number or absence of tag number is acceptable to specified timeTag.
+ if t := d.nextCBORType(); t == cborTypeTag {
+ if d.dm.timeTag == DecTagIgnored {
+ // Skip all enclosing tags
+ for t == cborTypeTag {
+ d.getHead()
+ t = d.nextCBORType()
+ }
+ if d.nextCBORNil() {
+ d.skip()
+ return time.Time{}, false, nil
+ }
+ } else {
+ // Read tag number
+ _, _, tagNum := d.getHead()
+ if tagNum != 0 && tagNum != 1 {
+ d.skip() // skip tag content
+ return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1")
+ }
+ }
+ } else {
+ if d.dm.timeTag == DecTagRequired {
+ d.skip()
+ return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String(), errorMsg: "expect CBOR tag value"}
+ }
+ }
+
+ switch t := d.nextCBORType(); t {
+ case cborTypeByteString:
+ if d.dm.byteStringToTime == ByteStringToTimeAllowed {
+ b, _ := d.parseByteString()
+ t, err := time.Parse(time.RFC3339, string(b))
+ if err != nil {
+ return time.Time{}, false, fmt.Errorf("cbor: cannot set %q for time.Time: %w", string(b), err)
+ }
+ return t, true, nil
+ }
+ return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()}
+
+ case cborTypeTextString:
+ s, err := d.parseTextString()
+ if err != nil {
+ return time.Time{}, false, err
+ }
+ t, err := time.Parse(time.RFC3339, string(s))
+ if err != nil {
+ return time.Time{}, false, errors.New("cbor: cannot set " + string(s) + " for time.Time: " + err.Error())
+ }
+ return t, true, nil
+
+ case cborTypePositiveInt:
+ _, _, val := d.getHead()
+ if val > math.MaxInt64 {
+ return time.Time{}, false, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: typeTime.String(),
+ errorMsg: fmt.Sprintf("%d overflows Go's int64", val),
+ }
+ }
+ return time.Unix(int64(val), 0), true, nil
+
+ case cborTypeNegativeInt:
+ _, _, val := d.getHead()
+ if val > math.MaxInt64 {
+ if val == math.MaxUint64 {
+ // Maximum absolute value representable by negative integer is 2^64,
+ // not 2^64-1, so it overflows uint64.
+ return time.Time{}, false, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: typeTime.String(),
+ errorMsg: "-18446744073709551616 overflows Go's int64",
+ }
+ }
+ return time.Time{}, false, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: typeTime.String(),
+ errorMsg: fmt.Sprintf("-%d overflows Go's int64", val+1),
+ }
+ }
+ return time.Unix(int64(-1)^int64(val), 0), true, nil
+
+ case cborTypePrimitives:
+ _, ai, val := d.getHead()
+ var f float64
+ switch ai {
+ case additionalInformationAsFloat16:
+ f = float64(float16.Frombits(uint16(val)).Float32())
+
+ case additionalInformationAsFloat32:
+ f = float64(math.Float32frombits(uint32(val)))
+
+ case additionalInformationAsFloat64:
+ f = math.Float64frombits(val)
+
+ default:
+ return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()}
+ }
+
+ if math.IsNaN(f) || math.IsInf(f, 0) {
+ // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.2-6
+ return time.Time{}, true, nil
+ }
+ seconds, fractional := math.Modf(f)
+ return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil
+
+ default:
+ return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()}
+ }
+}
+
+// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface.
+// It assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) parseToUnmarshaler(v reflect.Value) error {
+ if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
+ d.skip()
+ return nil
+ }
+
+ if v.Kind() != reflect.Pointer && v.CanAddr() {
+ v = v.Addr()
+ }
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ start := d.off
+ d.skip()
+ return u.UnmarshalCBOR(d.data[start:d.off])
+ }
+ d.skip()
+ return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler")
+}
+
+// parseToUnexportedUnmarshaler parses CBOR data to value implementing unmarshaler interface.
+// It assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) parseToUnexportedUnmarshaler(v reflect.Value) error {
+ if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
+ d.skip()
+ return nil
+ }
+
+ if v.Kind() != reflect.Pointer && v.CanAddr() {
+ v = v.Addr()
+ }
+ if u, ok := v.Interface().(unmarshaler); ok {
+ start := d.off
+ d.skip()
+ return u.unmarshalCBOR(d.data[start:d.off])
+ }
+ d.skip()
+ return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.unmarshaler")
+}
+
+// parseToJSONUnmarshaler parses CBOR data to be transcoded to JSON and passed to the value's
+// implementation of the json.Unmarshaler interface. It assumes data is well-formed, and does not
+// perform bounds checking.
+func (d *decoder) parseToJSONUnmarshaler(v reflect.Value) error {
+ if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
+ d.skip()
+ return nil
+ }
+
+ if v.Kind() != reflect.Pointer && v.CanAddr() {
+ v = v.Addr()
+ }
+ if u, ok := v.Interface().(jsonUnmarshaler); ok {
+ start := d.off
+ d.skip()
+ e := getEncodeBuffer()
+ defer putEncodeBuffer(e)
+ if err := d.dm.jsonUnmarshalerTranscoder.Transcode(e, bytes.NewReader(d.data[start:d.off])); err != nil {
+ return &TranscodeError{err: err, rtype: v.Type(), sourceFormat: "cbor", targetFormat: "json"}
+ }
+ return u.UnmarshalJSON(e.Bytes())
+ }
+ d.skip()
+ return errors.New("cbor: failed to assert " + v.Type().String() + " as json.Unmarshaler")
+}
+
+// parse parses CBOR data and returns value in default Go type.
+// It assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyclo
+ // Strip self-described CBOR tag number.
+ if skipSelfDescribedTag {
+ for d.nextCBORType() == cborTypeTag {
+ off := d.off
+ _, _, tagNum := d.getHead()
+ if tagNum != tagNumSelfDescribedCBOR {
+ d.off = off
+ break
+ }
+ }
+ }
+
+ // Check validity of supported built-in tags.
+ off := d.off
+ for d.nextCBORType() == cborTypeTag {
+ _, _, tagNum := d.getHead()
+ if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil {
+ d.skip()
+ return nil, err
+ }
+ }
+ d.off = off
+
+ t := d.nextCBORType()
+ switch t {
+ case cborTypePositiveInt:
+ _, _, val := d.getHead()
+
+ switch d.dm.intDec {
+ case IntDecConvertNone:
+ return val, nil
+
+ case IntDecConvertSigned, IntDecConvertSignedOrFail:
+ if val > math.MaxInt64 {
+ return nil, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: reflect.TypeOf(int64(0)).String(),
+ errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64",
+ }
+ }
+
+ return int64(val), nil
+
+ case IntDecConvertSignedOrBigInt:
+ if val > math.MaxInt64 {
+ bi := new(big.Int).SetUint64(val)
+ if d.dm.bigIntDec == BigIntDecodePointer {
+ return bi, nil
+ }
+ return *bi, nil
+ }
+
+ return int64(val), nil
+
+ default:
+ // not reachable
+ }
+
+ case cborTypeNegativeInt:
+ _, _, val := d.getHead()
+
+ if val > math.MaxInt64 {
+ // CBOR negative integer value overflows Go int64, use big.Int instead.
+ bi := new(big.Int).SetUint64(val)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+
+ if d.dm.intDec == IntDecConvertSignedOrFail {
+ return nil, &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: reflect.TypeOf(int64(0)).String(),
+ errorMsg: bi.String() + " overflows Go's int64",
+ }
+ }
+
+ if d.dm.bigIntDec == BigIntDecodePointer {
+ return bi, nil
+ }
+ return *bi, nil
+ }
+
+ nValue := int64(-1) ^ int64(val)
+ return nValue, nil
+
+ case cborTypeByteString:
+ b, copied := d.parseByteString()
+ var effectiveByteStringType = d.dm.defaultByteStringType
+ if effectiveByteStringType == nil {
+ effectiveByteStringType = typeByteSlice
+ }
+ b, converted, err := d.applyByteStringTextConversion(b, effectiveByteStringType)
+ if err != nil {
+ return nil, err
+ }
+ copied = copied || converted
+
+ switch effectiveByteStringType {
+ case typeByteSlice:
+ if copied {
+ return b, nil
+ }
+ clone := make([]byte, len(b))
+ copy(clone, b)
+ return clone, nil
+
+ case typeString:
+ return string(b), nil
+
+ default:
+ if copied || d.dm.defaultByteStringType.Kind() == reflect.String {
+ // Avoid an unnecessary copy since the conversion to string must
+ // copy the underlying bytes.
+ return reflect.ValueOf(b).Convert(d.dm.defaultByteStringType).Interface(), nil
+ }
+ clone := make([]byte, len(b))
+ copy(clone, b)
+ return reflect.ValueOf(clone).Convert(d.dm.defaultByteStringType).Interface(), nil
+ }
+
+ case cborTypeTextString:
+ b, err := d.parseTextString()
+ if err != nil {
+ return nil, err
+ }
+ return string(b), nil
+
+ case cborTypeTag:
+ tagOff := d.off
+ _, _, tagNum := d.getHead()
+ contentOff := d.off
+
+ switch tagNum {
+ case tagNumRFC3339Time, tagNumEpochTime:
+ d.off = tagOff
+ tm, _, err := d.parseToTime()
+ if err != nil {
+ return nil, err
+ }
+
+ switch d.dm.timeTagToAny {
+ case TimeTagToTime:
+ return tm, nil
+
+ case TimeTagToRFC3339:
+ if tagNum == 1 {
+ tm = tm.UTC()
+ }
+ // Call time.MarshalText() to format decoded time to RFC3339 format,
+ // and return error on time value that cannot be represented in
+ // RFC3339 format. E.g. year cannot exceed 9999, etc.
+ text, err := tm.Truncate(time.Second).MarshalText()
+ if err != nil {
+ return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format: %v", err)
+ }
+ return string(text), nil
+
+ case TimeTagToRFC3339Nano:
+ if tagNum == 1 {
+ tm = tm.UTC()
+ }
+ // Call time.MarshalText() to format decoded time to RFC3339 format,
+ // and return error on time value that cannot be represented in
+ // RFC3339 format with sub-second precision.
+ text, err := tm.MarshalText()
+ if err != nil {
+ return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format with sub-second precision: %v", err)
+ }
+ return string(text), nil
+
+ default:
+ // not reachable
+ }
+
+ case tagNumUnsignedBignum:
+ b, _ := d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+
+ if d.dm.bigIntDec == BigIntDecodePointer {
+ return bi, nil
+ }
+ return *bi, nil
+
+ case tagNumNegativeBignum:
+ b, _ := d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+
+ if d.dm.bigIntDec == BigIntDecodePointer {
+ return bi, nil
+ }
+ return *bi, nil
+
+ case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16:
+ // If conversion for interoperability with text encodings is not configured,
+ // treat tags 21-23 as unregistered tags.
+ if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding ||
+ d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone {
+ d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum)
+ defer func() {
+ d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1]
+ }()
+ return d.parse(false)
+ }
+ }
+
+ if d.dm.tags != nil {
+ // Parse to specified type if tag number is registered.
+ tagNums := []uint64{tagNum}
+ for d.nextCBORType() == cborTypeTag {
+ _, _, num := d.getHead()
+ tagNums = append(tagNums, num)
+ }
+ registeredType := d.dm.tags.getTypeFromTagNum(tagNums)
+ if registeredType != nil {
+ d.off = tagOff
+ rv := reflect.New(registeredType)
+ if err := d.parseToValue(rv.Elem(), getTypeInfo(registeredType)); err != nil {
+ return nil, err
+ }
+ return rv.Elem().Interface(), nil
+ }
+ }
+
+ // Parse tag content
+ d.off = contentOff
+ content, err := d.parse(false)
+ if err != nil {
+ return nil, err
+ }
+ if d.dm.unrecognizedTagToAny == UnrecognizedTagContentToAny {
+ return content, nil
+ }
+ return Tag{tagNum, content}, nil
+
+ case cborTypePrimitives:
+ _, ai, val := d.getHead()
+ if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] {
+ return nil, &UnacceptableDataItemError{
+ CBORType: t.String(),
+ Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized",
+ }
+ }
+ if ai < 20 || ai == 24 {
+ return SimpleValue(val), nil
+ }
+
+ switch ai {
+ case additionalInformationAsFalse,
+ additionalInformationAsTrue:
+ return (ai == additionalInformationAsTrue), nil
+
+ case additionalInformationAsNull,
+ additionalInformationAsUndefined:
+ return nil, nil
+
+ case additionalInformationAsFloat16:
+ f := float64(float16.Frombits(uint16(val)).Float32())
+ return f, nil
+
+ case additionalInformationAsFloat32:
+ f := float64(math.Float32frombits(uint32(val)))
+ return f, nil
+
+ case additionalInformationAsFloat64:
+ f := math.Float64frombits(val)
+ return f, nil
+ }
+
+ case cborTypeArray:
+ return d.parseArray()
+
+ case cborTypeMap:
+ if d.dm.defaultMapType != nil {
+ m := reflect.New(d.dm.defaultMapType)
+ err := d.parseToValue(m, getTypeInfo(m.Elem().Type()))
+ if err != nil {
+ return nil, err
+ }
+ return m.Elem().Interface(), nil
+ }
+ return d.parseMap()
+ }
+
+ return nil, nil
+}
+
+// parseByteString parses a CBOR encoded byte string. The returned byte slice
+// may be backed directly by the input. The second return value will be true if
+// and only if the slice is backed by a copy of the input. Callers are
+// responsible for making a copy if necessary.
+func (d *decoder) parseByteString() ([]byte, bool) {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ if !indefiniteLength {
+ b := d.data[d.off : d.off+int(val)]
+ d.off += int(val)
+ return b, false
+ }
+ // Process indefinite length string chunks.
+ b := []byte{}
+ for !d.foundBreak() {
+ _, _, val = d.getHead()
+ b = append(b, d.data[d.off:d.off+int(val)]...)
+ d.off += int(val)
+ }
+ return b, true
+}
+
+// applyByteStringTextConversion converts bytes read from a byte string to or from a configured text
+// encoding. If no transformation was performed (because it was not required), the original byte
+// slice is returned and the bool return value is false. Otherwise, a new slice containing the
+// converted bytes is returned along with the bool value true.
+func (d *decoder) applyByteStringTextConversion(
+ src []byte,
+ dstType reflect.Type,
+) (
+ dst []byte,
+ transformed bool,
+ err error,
+) {
+ switch dstType.Kind() {
+ case reflect.String:
+ if d.dm.byteStringToString != ByteStringToStringAllowedWithExpectedLaterEncoding || len(d.expectedLaterEncodingTags) == 0 {
+ return src, false, nil
+ }
+
+ switch d.expectedLaterEncodingTags[len(d.expectedLaterEncodingTags)-1] {
+ case tagNumExpectedLaterEncodingBase64URL:
+ encoded := make([]byte, base64.RawURLEncoding.EncodedLen(len(src)))
+ base64.RawURLEncoding.Encode(encoded, src)
+ return encoded, true, nil
+
+ case tagNumExpectedLaterEncodingBase64:
+ encoded := make([]byte, base64.StdEncoding.EncodedLen(len(src)))
+ base64.StdEncoding.Encode(encoded, src)
+ return encoded, true, nil
+
+ case tagNumExpectedLaterEncodingBase16:
+ encoded := make([]byte, hex.EncodedLen(len(src)))
+ hex.Encode(encoded, src)
+ return encoded, true, nil
+
+ default:
+ // If this happens, there is a bug: the decoder has pushed an invalid
+ // "expected later encoding" tag to the stack.
+ panic(fmt.Sprintf("unrecognized expected later encoding tag: %d", d.expectedLaterEncodingTags))
+ }
+
+ case reflect.Slice:
+ if dstType.Elem().Kind() != reflect.Uint8 || len(d.expectedLaterEncodingTags) > 0 {
+ // Either the destination is not a slice of bytes, or the encoder that
+ // produced the input indicated an expected text encoding tag and therefore
+ // the content of the byte string has NOT been text encoded.
+ return src, false, nil
+ }
+
+ switch d.dm.byteStringExpectedFormat {
+ case ByteStringExpectedBase64URL:
+ decoded := make([]byte, base64.RawURLEncoding.DecodedLen(len(src)))
+ n, err := base64.RawURLEncoding.Decode(decoded, src)
+ if err != nil {
+ return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64URL, err)
+ }
+ return decoded[:n], true, nil
+
+ case ByteStringExpectedBase64:
+ decoded := make([]byte, base64.StdEncoding.DecodedLen(len(src)))
+ n, err := base64.StdEncoding.Decode(decoded, src)
+ if err != nil {
+ return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64, err)
+ }
+ return decoded[:n], true, nil
+
+ case ByteStringExpectedBase16:
+ decoded := make([]byte, hex.DecodedLen(len(src)))
+ n, err := hex.Decode(decoded, src)
+ if err != nil {
+ return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase16, err)
+ }
+ return decoded[:n], true, nil
+ }
+ }
+
+ return src, false, nil
+}
+
+// parseTextString parses CBOR encoded text string. It returns a byte slice
+// to prevent creating an extra copy of string. Caller should wrap returned
+// byte slice as string when needed.
+func (d *decoder) parseTextString() ([]byte, error) {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ if !indefiniteLength {
+ b := d.data[d.off : d.off+int(val)]
+ d.off += int(val)
+ if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) {
+ return nil, &SemanticError{"cbor: invalid UTF-8 string"}
+ }
+ return b, nil
+ }
+ // Process indefinite length string chunks.
+ b := []byte{}
+ for !d.foundBreak() {
+ _, _, val = d.getHead()
+ x := d.data[d.off : d.off+int(val)]
+ d.off += int(val)
+ if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) {
+ for !d.foundBreak() {
+ d.skip() // Skip remaining chunk on error
+ }
+ return nil, &SemanticError{"cbor: invalid UTF-8 string"}
+ }
+ b = append(b, x...)
+ }
+ return b, nil
+}
+
+func (d *decoder) parseArray() ([]any, error) {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ if !hasSize {
+ count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance
+ }
+ v := make([]any, count)
+ var e any
+ var err, lastErr error
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ if e, lastErr = d.parse(true); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ continue
+ }
+ v[i] = e
+ }
+ return v, err
+}
+
+func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ if !hasSize {
+ count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance
+ }
+ if v.IsNil() || v.Cap() < count || count == 0 {
+ v.Set(reflect.MakeSlice(tInfo.nonPtrType, count, count))
+ }
+ v.SetLen(count)
+ var err error
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ if lastErr := d.parseToValue(v.Index(i), tInfo.elemTypeInfo); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ }
+ }
+ return err
+}
+
+func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ gi := 0
+ vLen := v.Len()
+ var err error
+ for ci := 0; (hasSize && ci < count) || (!hasSize && !d.foundBreak()); ci++ {
+ if gi < vLen {
+ // Read CBOR array element and set array element
+ if lastErr := d.parseToValue(v.Index(gi), tInfo.elemTypeInfo); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ }
+ gi++
+ } else {
+ d.skip() // Skip remaining CBOR array element
+ }
+ }
+ // Set remaining Go array elements to zero values.
+ if gi < vLen {
+ for ; gi < vLen; gi++ {
+ v.Index(gi).SetZero()
+ }
+ }
+ return err
+}
+
+func (d *decoder) parseMap() (any, error) {
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ m := make(map[any]any)
+ var k, e any
+ var err, lastErr error
+ keyCount := 0
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ // Parse CBOR map key.
+ if k, lastErr = d.parse(true); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ d.skip()
+ continue
+ }
+
+ // Detect if CBOR map key can be used as Go map key.
+ rv := reflect.ValueOf(k)
+ if !isHashableValue(rv) {
+ var converted bool
+ if d.dm.mapKeyByteString == MapKeyByteStringAllowed {
+ k, converted = convertByteSliceToByteString(k)
+ }
+ if !converted {
+ if err == nil {
+ err = &InvalidMapKeyTypeError{rv.Type().String()}
+ }
+ d.skip()
+ continue
+ }
+ }
+
+ // Parse CBOR map value.
+ if e, lastErr = d.parse(true); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ continue
+ }
+
+ // Add key-value pair to Go map.
+ m[k] = e
+
+ // Detect duplicate map key.
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ newKeyCount := len(m)
+ if newKeyCount == keyCount {
+ m[k] = nil
+ err = &DupMapKeyError{k, i}
+ i++
+ // skip the rest of the map
+ for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ d.skip() // Skip map key
+ d.skip() // Skip map value
+ }
+ return m, err
+ }
+ keyCount = newKeyCount
+ }
+ }
+ return m, err
+}
+
+func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ if v.IsNil() {
+ mapsize := count
+ if !hasSize {
+ mapsize = 0
+ }
+ v.Set(reflect.MakeMapWithSize(tInfo.nonPtrType, mapsize))
+ }
+ keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ
+ reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind)
+ var keyValue, eleValue reflect.Value
+ keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable.
+ var err, lastErr error
+ keyCount := v.Len()
+ var existingKeys map[any]bool // Store existing map keys, used for detecting duplicate map key.
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ existingKeys = make(map[any]bool, keyCount)
+ if keyCount > 0 {
+ vKeys := v.MapKeys()
+ for i := 0; i < len(vKeys); i++ {
+ existingKeys[vKeys[i].Interface()] = true
+ }
+ }
+ }
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ // Parse CBOR map key.
+ if !keyValue.IsValid() {
+ keyValue = reflect.New(keyType).Elem()
+ } else if !reuseKey {
+ keyValue.SetZero()
+ }
+ if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ d.skip()
+ continue
+ }
+
+ // Detect if CBOR map key can be used as Go map key.
+ if keyIsInterfaceType && keyValue.Elem().IsValid() {
+ if !isHashableValue(keyValue.Elem()) {
+ var converted bool
+ if d.dm.mapKeyByteString == MapKeyByteStringAllowed {
+ var k any
+ k, converted = convertByteSliceToByteString(keyValue.Elem().Interface())
+ if converted {
+ keyValue.Set(reflect.ValueOf(k))
+ }
+ }
+ if !converted {
+ if err == nil {
+ err = &InvalidMapKeyTypeError{keyValue.Elem().Type().String()}
+ }
+ d.skip()
+ continue
+ }
+ }
+ }
+
+ // Parse CBOR map value.
+ if !eleValue.IsValid() {
+ eleValue = reflect.New(eleType).Elem()
+ } else if !reuseEle {
+ eleValue.SetZero()
+ }
+ if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ continue
+ }
+
+ // Add key-value pair to Go map.
+ v.SetMapIndex(keyValue, eleValue)
+
+ // Detect duplicate map key.
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ newKeyCount := v.Len()
+ if newKeyCount == keyCount {
+ kvi := keyValue.Interface()
+ if !existingKeys[kvi] {
+ v.SetMapIndex(keyValue, reflect.New(eleType).Elem())
+ err = &DupMapKeyError{kvi, i}
+ i++
+ // skip the rest of the map
+ for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ d.skip() // skip map key
+ d.skip() // skip map value
+ }
+ return err
+ }
+ delete(existingKeys, kvi)
+ }
+ keyCount = newKeyCount
+ }
+ }
+ return err
+}
+
+func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error {
+ structType := getDecodingStructType(tInfo.nonPtrType)
+ if structType.err != nil {
+ return structType.err
+ }
+
+ if !structType.toArray {
+ t := d.nextCBORType()
+ d.skip()
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: "cannot decode CBOR array to struct without toarray option",
+ }
+ }
+
+ start := d.off
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+ if !hasSize {
+ count = d.numOfItemsUntilBreak() // peek ahead to get array size
+ }
+ if count != len(structType.fields) {
+ d.off = start
+ d.skip()
+ return &UnmarshalTypeError{
+ CBORType: cborTypeArray.String(),
+ GoType: tInfo.typ.String(),
+ errorMsg: "cannot decode CBOR array to struct with different number of elements",
+ }
+ }
+ var err, lastErr error
+ for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
+ f := structType.fields[i]
+
+ // Get field value by index
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) {
+ // Return a new value for embedded field null pointer to point to, or return error.
+ if !v.CanSet() {
+ return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String())
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ return v, nil
+ })
+ if lastErr != nil && err == nil {
+ err = lastErr
+ }
+ if !fv.IsValid() {
+ d.skip()
+ continue
+ }
+ }
+
+ if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil {
+ if err == nil {
+ if typeError, ok := lastErr.(*UnmarshalTypeError); ok {
+ typeError.StructFieldName = tInfo.typ.String() + "." + f.name
+ err = typeError
+ } else {
+ err = lastErr
+ }
+ }
+ }
+ }
+ return err
+}
+
+// parseMapToStruct needs to be fast so gocyclo can be ignored for now.
+func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo
+ structType := getDecodingStructType(tInfo.nonPtrType)
+ if structType.err != nil {
+ return structType.err
+ }
+
+ if structType.toArray {
+ t := d.nextCBORType()
+ d.skip()
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: tInfo.nonPtrType.String(),
+ errorMsg: "cannot decode CBOR map to struct with toarray option",
+ }
+ }
+
+ var err, lastErr error
+
+ // Get CBOR map size
+ _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+ hasSize := !indefiniteLength
+ count := int(val)
+
+ // Keeps track of matched struct fields
+ var foundFldIdx []bool
+ {
+ const maxStackFields = 128
+ if nfields := len(structType.fields); nfields <= maxStackFields {
+ // For structs with typical field counts, expect that this can be
+ // stack-allocated.
+ var a [maxStackFields]bool
+ foundFldIdx = a[:nfields]
+ } else {
+ foundFldIdx = make([]bool, len(structType.fields))
+ }
+ }
+
+ // Keeps track of CBOR map keys to detect duplicate map key
+ keyCount := 0
+ var mapKeys map[any]struct{}
+
+ errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0
+
+MapEntryLoop:
+ for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ var f *field
+
+ // If duplicate field detection is enabled and the key at index j did not match any
+ // field, k will hold the map key.
+ var k any
+
+ t := d.nextCBORType()
+ if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) {
+ var keyBytes []byte
+ if t == cborTypeTextString {
+ keyBytes, lastErr = d.parseTextString()
+ if lastErr != nil {
+ if err == nil {
+ err = lastErr
+ }
+ d.skip() // skip value
+ continue
+ }
+ } else { // cborTypeByteString
+ keyBytes, _ = d.parseByteString()
+ }
+
+ // Check for exact match on field name.
+ if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok {
+ fld := structType.fields[i]
+
+ if !foundFldIdx[i] {
+ f = fld
+ foundFldIdx[i] = true
+ } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ err = &DupMapKeyError{fld.name, j}
+ d.skip() // skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ } else {
+ // discard repeated match
+ d.skip()
+ continue MapEntryLoop
+ }
+ }
+
+ // Find field with case-insensitive match
+ if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive {
+ keyLen := len(keyBytes)
+ keyString := string(keyBytes)
+ for i := 0; i < len(structType.fields); i++ {
+ fld := structType.fields[i]
+ if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) {
+ if !foundFldIdx[i] {
+ f = fld
+ foundFldIdx[i] = true
+ } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ err = &DupMapKeyError{keyString, j}
+ d.skip() // skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ } else {
+ // discard repeated match
+ d.skip()
+ continue MapEntryLoop
+ }
+ break
+ }
+ }
+ }
+
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil {
+ k = string(keyBytes)
+ }
+ } else if t <= cborTypeNegativeInt { // uint/int
+ var nameAsInt int64
+
+ if t == cborTypePositiveInt {
+ _, _, val := d.getHead()
+ nameAsInt = int64(val)
+ } else {
+ _, _, val := d.getHead()
+ if val > math.MaxInt64 {
+ if err == nil {
+ err = &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: reflect.TypeOf(int64(0)).String(),
+ errorMsg: "-1-" + strconv.FormatUint(val, 10) + " overflows Go's int64",
+ }
+ }
+ d.skip() // skip value
+ continue
+ }
+ nameAsInt = int64(-1) ^ int64(val)
+ }
+
+ // Find field
+ for i := 0; i < len(structType.fields); i++ {
+ fld := structType.fields[i]
+ if fld.keyAsInt && fld.nameAsInt == nameAsInt {
+ if !foundFldIdx[i] {
+ f = fld
+ foundFldIdx[i] = true
+ } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ err = &DupMapKeyError{nameAsInt, j}
+ d.skip() // skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ } else {
+ // discard repeated match
+ d.skip()
+ continue MapEntryLoop
+ }
+ break
+ }
+ }
+
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil {
+ k = nameAsInt
+ }
+ } else {
+ if err == nil {
+ err = &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: reflect.TypeOf("").String(),
+ errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name",
+ }
+ }
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ // parse key
+ k, lastErr = d.parse(true)
+ if lastErr != nil {
+ d.skip() // skip value
+ continue
+ }
+ // Detect if CBOR map key can be used as Go map key.
+ if !isHashableValue(reflect.ValueOf(k)) {
+ d.skip() // skip value
+ continue
+ }
+ } else {
+ d.skip() // skip key
+ }
+ }
+
+ if f == nil {
+ if errOnUnknownField {
+ err = &UnknownFieldError{j}
+ d.skip() // Skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ }
+
+ // Two map keys that match the same struct field are immediately considered
+ // duplicates. This check detects duplicates between two map keys that do
+ // not match a struct field. If unknown field errors are enabled, then this
+ // check is never reached.
+ if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
+ if mapKeys == nil {
+ mapKeys = make(map[any]struct{}, 1)
+ }
+ mapKeys[k] = struct{}{}
+ newKeyCount := len(mapKeys)
+ if newKeyCount == keyCount {
+ err = &DupMapKeyError{k, j}
+ d.skip() // skip value
+ j++
+ // skip the rest of the map
+ for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ {
+ d.skip()
+ d.skip()
+ }
+ return err
+ }
+ keyCount = newKeyCount
+ }
+
+ d.skip() // Skip value
+ continue
+ }
+
+ // Get field value by index
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) {
+ // Return a new value for embedded field null pointer to point to, or return error.
+ if !v.CanSet() {
+ return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String())
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ return v, nil
+ })
+ if lastErr != nil && err == nil {
+ err = lastErr
+ }
+ if !fv.IsValid() {
+ d.skip()
+ continue
+ }
+ }
+
+ if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil {
+ if err == nil {
+ if typeError, ok := lastErr.(*UnmarshalTypeError); ok {
+ typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name
+ err = typeError
+ } else {
+ err = lastErr
+ }
+ }
+ }
+ }
+ return err
+}
+
+// validRegisteredTagNums verifies that tag numbers match registered tag numbers of type t.
+// validRegisteredTagNums assumes next CBOR data type is tag. It scans all tag numbers, and stops at tag content.
+func (d *decoder) validRegisteredTagNums(registeredTag *tagItem) error {
+ // Scan until next cbor data is tag content.
+ tagNums := make([]uint64, 0, 1)
+ for d.nextCBORType() == cborTypeTag {
+ _, _, val := d.getHead()
+ tagNums = append(tagNums, val)
+ }
+
+ if !registeredTag.equalTagNum(tagNums) {
+ return &WrongTagError{registeredTag.contentType, registeredTag.num, tagNums}
+ }
+ return nil
+}
+
+func (d *decoder) getRegisteredTagItem(vt reflect.Type) *tagItem {
+ if d.dm.tags != nil {
+ return d.dm.tags.getTagItemFromType(vt)
+ }
+ return nil
+}
+
+// skip moves data offset to the next item. skip assumes data is well-formed,
+// and does not perform bounds checking.
+func (d *decoder) skip() {
+ t, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
+
+ if indefiniteLength {
+ switch t {
+ case cborTypeByteString, cborTypeTextString, cborTypeArray, cborTypeMap:
+ for {
+ if isBreakFlag(d.data[d.off]) {
+ d.off++
+ return
+ }
+ d.skip()
+ }
+ }
+ }
+
+ switch t {
+ case cborTypeByteString, cborTypeTextString:
+ d.off += int(val)
+
+ case cborTypeArray:
+ for i := 0; i < int(val); i++ {
+ d.skip()
+ }
+
+ case cborTypeMap:
+ for i := 0; i < int(val)*2; i++ {
+ d.skip()
+ }
+
+ case cborTypeTag:
+ d.skip()
+ }
+}
+
+func (d *decoder) getHeadWithIndefiniteLengthFlag() (
+ t cborType,
+ ai byte,
+ val uint64,
+ indefiniteLength bool,
+) {
+ t, ai, val = d.getHead()
+ indefiniteLength = additionalInformation(ai).isIndefiniteLength()
+ return
+}
+
+// getHead assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) getHead() (t cborType, ai byte, val uint64) {
+ t, ai = parseInitialByte(d.data[d.off])
+ val = uint64(ai)
+ d.off++
+
+ if ai <= maxAdditionalInformationWithoutArgument {
+ return
+ }
+
+ if ai == additionalInformationWith1ByteArgument {
+ val = uint64(d.data[d.off])
+ d.off++
+ return
+ }
+
+ if ai == additionalInformationWith2ByteArgument {
+ const argumentSize = 2
+ val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize]))
+ d.off += argumentSize
+ return
+ }
+
+ if ai == additionalInformationWith4ByteArgument {
+ const argumentSize = 4
+ val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize]))
+ d.off += argumentSize
+ return
+ }
+
+ if ai == additionalInformationWith8ByteArgument {
+ const argumentSize = 8
+ val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize])
+ d.off += argumentSize
+ return
+ }
+ return
+}
+
+func (d *decoder) numOfItemsUntilBreak() int {
+ savedOff := d.off
+ i := 0
+ for !d.foundBreak() {
+ d.skip()
+ i++
+ }
+ d.off = savedOff
+ return i
+}
+
+// foundBreak returns true if next byte is CBOR break code and moves cursor by 1,
+// otherwise it returns false.
+// foundBreak assumes data is well-formed, and does not perform bounds checking.
+func (d *decoder) foundBreak() bool {
+ if isBreakFlag(d.data[d.off]) {
+ d.off++
+ return true
+ }
+ return false
+}
+
+func (d *decoder) reset(data []byte) {
+ d.data = data
+ d.off = 0
+ d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:0]
+}
+
+func (d *decoder) nextCBORType() cborType {
+ return getType(d.data[d.off])
+}
+
+func (d *decoder) nextCBORNil() bool {
+ return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7
+}
+
+type jsonUnmarshaler interface{ UnmarshalJSON([]byte) error }
+
+var (
+ typeIntf = reflect.TypeOf([]any(nil)).Elem()
+ typeTime = reflect.TypeOf(time.Time{})
+ typeBigInt = reflect.TypeOf(big.Int{})
+ typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+ typeUnexportedUnmarshaler = reflect.TypeOf((*unmarshaler)(nil)).Elem()
+ typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
+ typeTextUnmarshaler = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+ typeJSONUnmarshaler = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
+ typeString = reflect.TypeOf("")
+ typeByteSlice = reflect.TypeOf([]byte(nil))
+)
+
+func fillNil(_ cborType, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Interface, reflect.Pointer:
+ v.SetZero()
+ return nil
+ }
+ return nil
+}
+
+func fillPositiveInt(t cborType, val uint64, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if val > math.MaxInt64 {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(),
+ }
+ }
+ if v.OverflowInt(int64(val)) {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(),
+ }
+ }
+ v.SetInt(int64(val))
+ return nil
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if v.OverflowUint(val) {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(),
+ }
+ }
+ v.SetUint(val)
+ return nil
+
+ case reflect.Float32, reflect.Float64:
+ f := float64(val)
+ v.SetFloat(f)
+ return nil
+ }
+
+ if v.Type() == typeBigInt {
+ i := new(big.Int).SetUint64(val)
+ v.Set(reflect.ValueOf(*i))
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillNegativeInt(t cborType, val int64, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if v.OverflowInt(val) {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatInt(val, 10) + " overflows " + v.Type().String(),
+ }
+ }
+ v.SetInt(val)
+ return nil
+
+ case reflect.Float32, reflect.Float64:
+ f := float64(val)
+ v.SetFloat(f)
+ return nil
+ }
+ if v.Type() == typeBigInt {
+ i := new(big.Int).SetInt64(val)
+ v.Set(reflect.ValueOf(*i))
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillBool(t cborType, val bool, v reflect.Value) error {
+ if v.Kind() == reflect.Bool {
+ v.SetBool(val)
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillFloat(t cborType, val float64, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Float32, reflect.Float64:
+ if v.OverflowFloat(val) {
+ return &UnmarshalTypeError{
+ CBORType: t.String(),
+ GoType: v.Type().String(),
+ errorMsg: strconv.FormatFloat(val, 'E', -1, 64) + " overflows " + v.Type().String(),
+ }
+ }
+ v.SetFloat(val)
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode, tum TextUnmarshalerMode) error {
+ if bum == BinaryUnmarshalerByteString && reflect.PointerTo(v.Type()).Implements(typeBinaryUnmarshaler) {
+ if v.CanAddr() {
+ v = v.Addr()
+ if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok {
+ // The contract of BinaryUnmarshaler forbids
+ // retaining the input bytes, so no copying is
+ // required even if val is shared.
+ return u.UnmarshalBinary(val)
+ }
+ }
+ return errors.New("cbor: cannot set new value for " + v.Type().String())
+ }
+ if bsts != ByteStringToStringForbidden {
+ if tum == TextUnmarshalerTextString && reflect.PointerTo(v.Type()).Implements(typeTextUnmarshaler) {
+ if v.CanAddr() {
+ v = v.Addr()
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ // The contract of TextUnmarshaler forbids retaining the input
+ // bytes, so no copying is required even if val is shared.
+ if err := u.UnmarshalText(val); err != nil {
+ return fmt.Errorf("cbor: cannot unmarshal text for %s: %w", v.Type(), err)
+ }
+ return nil
+ }
+ }
+ return errors.New("cbor: cannot set new value for " + v.Type().String())
+ }
+
+ if v.Kind() == reflect.String {
+ v.SetString(string(val))
+ return nil
+ }
+ }
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
+ src := val
+ if shared {
+ // SetBytes shares the underlying bytes of the source slice.
+ src = make([]byte, len(val))
+ copy(src, val)
+ }
+ v.SetBytes(src)
+ return nil
+ }
+ if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 {
+ vLen := v.Len()
+ i := 0
+ for ; i < vLen && i < len(val); i++ {
+ v.Index(i).SetUint(uint64(val[i]))
+ }
+ // Set remaining Go array elements to zero values.
+ if i < vLen {
+ for ; i < vLen; i++ {
+ v.Index(i).SetZero()
+ }
+ }
+ return nil
+ }
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func fillTextString(t cborType, val []byte, v reflect.Value, tum TextUnmarshalerMode) error {
+ // Check if the value implements TextUnmarshaler and the mode allows it
+ if tum == TextUnmarshalerTextString && reflect.PointerTo(v.Type()).Implements(typeTextUnmarshaler) {
+ if v.CanAddr() {
+ v = v.Addr()
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ // The contract of TextUnmarshaler forbids retaining the input
+ // bytes, so no copying is required even if val is shared.
+ if err := u.UnmarshalText(val); err != nil {
+ return fmt.Errorf("cbor: cannot unmarshal text for %s: %w", v.Type(), err)
+ }
+ return nil
+ }
+ }
+ return errors.New("cbor: cannot set new value for " + v.Type().String())
+ }
+
+ if v.Kind() == reflect.String {
+ v.SetString(string(val))
+ return nil
+ }
+
+ return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
+}
+
+func isImmutableKind(k reflect.Kind) bool {
+ switch k {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64,
+ reflect.String:
+ return true
+
+ default:
+ return false
+ }
+}
+
+func isHashableValue(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Func:
+ return false
+
+ case reflect.Struct:
+ switch rv.Type() {
+ case typeTag:
+ tag := rv.Interface().(Tag)
+ return isHashableValue(reflect.ValueOf(tag.Content))
+ case typeBigInt:
+ return false
+ }
+ }
+ return true
+}
+
+// convertByteSliceToByteString converts []byte to ByteString if
+// - v is []byte type, or
+// - v is Tag type and tag content type is []byte
+// This function also handles nested tags.
+// CBOR data is already verified to be well-formed before this function is used,
+// so the recursion won't exceed max nested levels.
+func convertByteSliceToByteString(v any) (any, bool) {
+ switch v := v.(type) {
+ case []byte:
+ return ByteString(v), true
+
+ case Tag:
+ content, converted := convertByteSliceToByteString(v.Content)
+ if converted {
+ return Tag{Number: v.Number, Content: content}, true
+ }
+ }
+ return v, false
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/vendor/github.com/fxamacker/cbor/v2/diagnose.go
new file mode 100644
index 000000000..44afb8660
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/diagnose.go
@@ -0,0 +1,724 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "strconv"
+ "unicode/utf16"
+ "unicode/utf8"
+
+ "github.com/x448/float16"
+)
+
+// DiagMode is the main interface for CBOR diagnostic notation.
+type DiagMode interface {
+ // Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode.
+ Diagnose([]byte) (string, error)
+
+ // DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
+ DiagnoseFirst([]byte) (string, []byte, error)
+
+ // DiagOptions returns user specified options used to create this DiagMode.
+ DiagOptions() DiagOptions
+}
+
+// ByteStringEncoding specifies the base encoding that byte strings are notated.
+type ByteStringEncoding uint8
+
+const (
+ // ByteStringBase16Encoding encodes byte strings in base16, without padding.
+ ByteStringBase16Encoding ByteStringEncoding = iota
+
+ // ByteStringBase32Encoding encodes byte strings in base32, without padding.
+ ByteStringBase32Encoding
+
+ // ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding.
+ ByteStringBase32HexEncoding
+
+ // ByteStringBase64Encoding encodes byte strings in base64url, without padding.
+ ByteStringBase64Encoding
+
+ maxByteStringEncoding
+)
+
+func (bse ByteStringEncoding) valid() error {
+ if bse >= maxByteStringEncoding {
+ return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse)))
+ }
+ return nil
+}
+
+// DiagOptions specifies Diag options.
+type DiagOptions struct {
+ // ByteStringEncoding specifies the base encoding that byte strings are notated.
+ // Default is ByteStringBase16Encoding.
+ ByteStringEncoding ByteStringEncoding
+
+ // ByteStringHexWhitespace specifies notating with whitespace in byte string
+ // when ByteStringEncoding is ByteStringBase16Encoding.
+ ByteStringHexWhitespace bool
+
+ // ByteStringText specifies notating with text in byte string
+ // if it is a valid UTF-8 text.
+ ByteStringText bool
+
+ // ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string
+ // if it is a valid CBOR bytes.
+ ByteStringEmbeddedCBOR bool
+
+ // CBORSequence specifies notating CBOR sequences.
+ // otherwise, it returns an error if there are more bytes after the first CBOR.
+ CBORSequence bool
+
+ // FloatPrecisionIndicator specifies appending a suffix to indicate float precision.
+ // Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators.
+ FloatPrecisionIndicator bool
+
+ // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags.
+ // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can
+ // require larger amounts of stack to deserialize. Don't increase this higher than you require.
+ MaxNestedLevels int
+
+ // MaxArrayElements specifies the max number of elements for CBOR arrays.
+ // Default is 128*1024=131072 and it can be set to [16, 2147483647]
+ MaxArrayElements int
+
+ // MaxMapPairs specifies the max number of key-value pairs for CBOR maps.
+ // Default is 128*1024=131072 and it can be set to [16, 2147483647]
+ MaxMapPairs int
+}
+
+// DiagMode returns a DiagMode with immutable options.
+func (opts DiagOptions) DiagMode() (DiagMode, error) {
+ return opts.diagMode()
+}
+
+func (opts DiagOptions) diagMode() (*diagMode, error) {
+ if err := opts.ByteStringEncoding.valid(); err != nil {
+ return nil, err
+ }
+
+ decMode, err := DecOptions{
+ MaxNestedLevels: opts.MaxNestedLevels,
+ MaxArrayElements: opts.MaxArrayElements,
+ MaxMapPairs: opts.MaxMapPairs,
+ }.decMode()
+ if err != nil {
+ return nil, err
+ }
+
+ return &diagMode{
+ byteStringEncoding: opts.ByteStringEncoding,
+ byteStringHexWhitespace: opts.ByteStringHexWhitespace,
+ byteStringText: opts.ByteStringText,
+ byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR,
+ cborSequence: opts.CBORSequence,
+ floatPrecisionIndicator: opts.FloatPrecisionIndicator,
+ decMode: decMode,
+ }, nil
+}
+
+type diagMode struct {
+ byteStringEncoding ByteStringEncoding
+ byteStringHexWhitespace bool
+ byteStringText bool
+ byteStringEmbeddedCBOR bool
+ cborSequence bool
+ floatPrecisionIndicator bool
+ decMode *decMode
+}
+
+// DiagOptions returns user specified options used to create this DiagMode.
+func (dm *diagMode) DiagOptions() DiagOptions {
+ return DiagOptions{
+ ByteStringEncoding: dm.byteStringEncoding,
+ ByteStringHexWhitespace: dm.byteStringHexWhitespace,
+ ByteStringText: dm.byteStringText,
+ ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR,
+ CBORSequence: dm.cborSequence,
+ FloatPrecisionIndicator: dm.floatPrecisionIndicator,
+ MaxNestedLevels: dm.decMode.maxNestedLevels,
+ MaxArrayElements: dm.decMode.maxArrayElements,
+ MaxMapPairs: dm.decMode.maxMapPairs,
+ }
+}
+
+// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode.
+func (dm *diagMode) Diagnose(data []byte) (string, error) {
+ return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence)
+}
+
+// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
+func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
+ return newDiagnose(data, dm.decMode, dm).diagFirst()
+}
+
+var defaultDiagMode, _ = DiagOptions{}.diagMode()
+
+// Diagnose returns extended diagnostic notation (EDN) of CBOR data items
+// using the default diagnostic mode.
+//
+// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation.
+func Diagnose(data []byte) (string, error) {
+ return defaultDiagMode.Diagnose(data)
+}
+
+// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
+func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
+ return defaultDiagMode.DiagnoseFirst(data)
+}
+
+type diagnose struct {
+ dm *diagMode
+ d *decoder
+ w *bytes.Buffer
+}
+
+func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose {
+ return &diagnose{
+ dm: diagm,
+ d: &decoder{data: data, dm: decm},
+ w: &bytes.Buffer{},
+ }
+}
+
+func (di *diagnose) diag(cborSequence bool) (string, error) {
+ // CBOR Sequence
+ firstItem := true
+ for {
+ switch err := di.wellformed(cborSequence); err {
+ case nil:
+ if !firstItem {
+ di.w.WriteString(", ")
+ }
+ firstItem = false
+ if itemErr := di.item(); itemErr != nil {
+ return di.w.String(), itemErr
+ }
+
+ case io.EOF:
+ if firstItem {
+ return di.w.String(), err
+ }
+ return di.w.String(), nil
+
+ default:
+ return di.w.String(), err
+ }
+ }
+}
+
+func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) {
+ err = di.wellformed(true)
+ if err == nil {
+ err = di.item()
+ }
+
+ if err == nil {
+ // Return EDN and the rest of the data slice (which might be len 0)
+ return di.w.String(), di.d.data[di.d.off:], nil
+ }
+
+ return di.w.String(), nil, err
+}
+
+func (di *diagnose) wellformed(allowExtraData bool) error {
+ off := di.d.off
+ err := di.d.wellformed(allowExtraData, false)
+ di.d.off = off
+ return err
+}
+
+func (di *diagnose) item() error { //nolint:gocyclo
+ initialByte := di.d.data[di.d.off]
+ switch initialByte {
+ case cborByteStringWithIndefiniteLengthHead,
+ cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string
+ di.d.off++
+ if isBreakFlag(di.d.data[di.d.off]) {
+ di.d.off++
+ switch initialByte {
+ case cborByteStringWithIndefiniteLengthHead:
+ // indefinite-length bytes with no chunks.
+ di.w.WriteString(`''_`)
+ return nil
+ case cborTextStringWithIndefiniteLengthHead:
+ // indefinite-length text with no chunks.
+ di.w.WriteString(`""_`)
+ return nil
+ }
+ }
+
+ di.w.WriteString("(_ ")
+
+ i := 0
+ for !di.d.foundBreak() {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+
+ i++
+ // wellformedIndefiniteString() already checked that the next item is a byte/text string.
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+
+ di.w.WriteByte(')')
+ return nil
+
+ case cborArrayWithIndefiniteLengthHead: // indefinite-length array
+ di.d.off++
+ di.w.WriteString("[_ ")
+
+ i := 0
+ for !di.d.foundBreak() {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+
+ i++
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+
+ di.w.WriteByte(']')
+ return nil
+
+ case cborMapWithIndefiniteLengthHead: // indefinite-length map
+ di.d.off++
+ di.w.WriteString("{_ ")
+
+ i := 0
+ for !di.d.foundBreak() {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+
+ i++
+ // key
+ if err := di.item(); err != nil {
+ return err
+ }
+
+ di.w.WriteString(": ")
+
+ // value
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+
+ di.w.WriteByte('}')
+ return nil
+ }
+
+ t := di.d.nextCBORType()
+ switch t {
+ case cborTypePositiveInt:
+ _, _, val := di.d.getHead()
+ di.w.WriteString(strconv.FormatUint(val, 10))
+ return nil
+
+ case cborTypeNegativeInt:
+ _, _, val := di.d.getHead()
+ if val > math.MaxInt64 {
+ // CBOR negative integer overflows int64, use big.Int to store value.
+ bi := new(big.Int)
+ bi.SetUint64(val)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+ di.w.WriteString(bi.String())
+ return nil
+ }
+
+ nValue := int64(-1) ^ int64(val)
+ di.w.WriteString(strconv.FormatInt(nValue, 10))
+ return nil
+
+ case cborTypeByteString:
+ b, _ := di.d.parseByteString()
+ return di.encodeByteString(b)
+
+ case cborTypeTextString:
+ b, err := di.d.parseTextString()
+ if err != nil {
+ return err
+ }
+ return di.encodeTextString(string(b), '"')
+
+ case cborTypeArray:
+ _, _, val := di.d.getHead()
+ count := int(val)
+ di.w.WriteByte('[')
+
+ for i := 0; i < count; i++ {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+ di.w.WriteByte(']')
+ return nil
+
+ case cborTypeMap:
+ _, _, val := di.d.getHead()
+ count := int(val)
+ di.w.WriteByte('{')
+
+ for i := 0; i < count; i++ {
+ if i > 0 {
+ di.w.WriteString(", ")
+ }
+ // key
+ if err := di.item(); err != nil {
+ return err
+ }
+ di.w.WriteString(": ")
+ // value
+ if err := di.item(); err != nil {
+ return err
+ }
+ }
+ di.w.WriteByte('}')
+ return nil
+
+ case cborTypeTag:
+ _, _, tagNum := di.d.getHead()
+ switch tagNum {
+ case tagNumUnsignedBignum:
+ if nt := di.d.nextCBORType(); nt != cborTypeByteString {
+ return newInadmissibleTagContentTypeError(
+ tagNumUnsignedBignum,
+ "byte string",
+ nt.String())
+ }
+
+ b, _ := di.d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+ di.w.WriteString(bi.String())
+ return nil
+
+ case tagNumNegativeBignum:
+ if nt := di.d.nextCBORType(); nt != cborTypeByteString {
+ return newInadmissibleTagContentTypeError(
+ tagNumNegativeBignum,
+ "byte string",
+ nt.String(),
+ )
+ }
+
+ b, _ := di.d.parseByteString()
+ bi := new(big.Int).SetBytes(b)
+ bi.Add(bi, big.NewInt(1))
+ bi.Neg(bi)
+ di.w.WriteString(bi.String())
+ return nil
+
+ default:
+ di.w.WriteString(strconv.FormatUint(tagNum, 10))
+ di.w.WriteByte('(')
+ if err := di.item(); err != nil {
+ return err
+ }
+ di.w.WriteByte(')')
+ return nil
+ }
+
+ case cborTypePrimitives:
+ _, ai, val := di.d.getHead()
+ switch ai {
+ case additionalInformationAsFalse:
+ di.w.WriteString("false")
+ return nil
+
+ case additionalInformationAsTrue:
+ di.w.WriteString("true")
+ return nil
+
+ case additionalInformationAsNull:
+ di.w.WriteString("null")
+ return nil
+
+ case additionalInformationAsUndefined:
+ di.w.WriteString("undefined")
+ return nil
+
+ case additionalInformationAsFloat16,
+ additionalInformationAsFloat32,
+ additionalInformationAsFloat64:
+ return di.encodeFloat(ai, val)
+
+ default:
+ di.w.WriteString("simple(")
+ di.w.WriteString(strconv.FormatUint(val, 10))
+ di.w.WriteByte(')')
+ return nil
+ }
+ }
+
+ return nil
+}
+
+// writeU16 format a rune as "\uxxxx"
+func (di *diagnose) writeU16(val rune) {
+ di.w.WriteString("\\u")
+ var in [2]byte
+ in[0] = byte(val >> 8)
+ in[1] = byte(val)
+ sz := hex.EncodedLen(len(in))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ hex.Encode(dst, in[:])
+ di.w.Write(dst)
+}
+
+var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding)
+var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
+
+func (di *diagnose) encodeByteString(val []byte) error {
+ if len(val) > 0 {
+ if di.dm.byteStringText && utf8.Valid(val) {
+ return di.encodeTextString(string(val), '\'')
+ }
+
+ if di.dm.byteStringEmbeddedCBOR {
+ di2 := newDiagnose(val, di.dm.decMode, di.dm)
+ // should always notating embedded CBOR sequence.
+ if str, err := di2.diag(true); err == nil {
+ di.w.WriteString("<<")
+ di.w.WriteString(str)
+ di.w.WriteString(">>")
+ return nil
+ }
+ }
+ }
+
+ switch di.dm.byteStringEncoding {
+ case ByteStringBase16Encoding:
+ di.w.WriteString("h'")
+ if di.dm.byteStringHexWhitespace {
+ sz := hex.EncodedLen(len(val))
+ if len(val) > 0 {
+ sz += len(val) - 1
+ }
+ di.w.Grow(sz)
+
+ dst := di.w.Bytes()[di.w.Len():]
+ for i := range val {
+ if i > 0 {
+ dst = append(dst, ' ')
+ }
+ hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1])
+ dst = dst[:len(dst)+2]
+ }
+ di.w.Write(dst)
+ } else {
+ sz := hex.EncodedLen(len(val))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ hex.Encode(dst, val)
+ di.w.Write(dst)
+ }
+ di.w.WriteByte('\'')
+ return nil
+
+ case ByteStringBase32Encoding:
+ di.w.WriteString("b32'")
+ sz := rawBase32Encoding.EncodedLen(len(val))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ rawBase32Encoding.Encode(dst, val)
+ di.w.Write(dst)
+ di.w.WriteByte('\'')
+ return nil
+
+ case ByteStringBase32HexEncoding:
+ di.w.WriteString("h32'")
+ sz := rawBase32HexEncoding.EncodedLen(len(val))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ rawBase32HexEncoding.Encode(dst, val)
+ di.w.Write(dst)
+ di.w.WriteByte('\'')
+ return nil
+
+ case ByteStringBase64Encoding:
+ di.w.WriteString("b64'")
+ sz := base64.RawURLEncoding.EncodedLen(len(val))
+ di.w.Grow(sz)
+ dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
+ base64.RawURLEncoding.Encode(dst, val)
+ di.w.Write(dst)
+ di.w.WriteByte('\'')
+ return nil
+
+ default:
+ // It should not be possible for users to construct a *diagMode with an invalid byte
+ // string encoding.
+ panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding))
+ }
+}
+
+const utf16SurrSelf = rune(0x10000)
+
+// quote should be either `'` or `"`
+func (di *diagnose) encodeTextString(val string, quote byte) error {
+ di.w.WriteByte(quote)
+
+ for i := 0; i < len(val); {
+ if b := val[i]; b < utf8.RuneSelf {
+ switch {
+ case b == '\t', b == '\n', b == '\r', b == '\\', b == quote:
+ di.w.WriteByte('\\')
+
+ switch b {
+ case '\t':
+ b = 't'
+ case '\n':
+ b = 'n'
+ case '\r':
+ b = 'r'
+ }
+ di.w.WriteByte(b)
+
+ case b >= ' ' && b <= '~':
+ di.w.WriteByte(b)
+
+ default:
+ di.writeU16(rune(b))
+ }
+
+ i++
+ continue
+ }
+
+ c, size := utf8.DecodeRuneInString(val[i:])
+ switch {
+ case c == utf8.RuneError:
+ return &SemanticError{"cbor: invalid UTF-8 string"}
+
+ case c < utf16SurrSelf:
+ di.writeU16(c)
+
+ default:
+ c1, c2 := utf16.EncodeRune(c)
+ di.writeU16(c1)
+ di.writeU16(c2)
+ }
+
+ i += size
+ }
+
+ di.w.WriteByte(quote)
+ return nil
+}
+
+func (di *diagnose) encodeFloat(ai byte, val uint64) error {
+ f64 := float64(0)
+ switch ai {
+ case additionalInformationAsFloat16:
+ f16 := float16.Frombits(uint16(val))
+ switch {
+ case f16.IsNaN():
+ di.w.WriteString("NaN")
+ return nil
+ case f16.IsInf(1):
+ di.w.WriteString("Infinity")
+ return nil
+ case f16.IsInf(-1):
+ di.w.WriteString("-Infinity")
+ return nil
+ default:
+ f64 = float64(f16.Float32())
+ }
+
+ case additionalInformationAsFloat32:
+ f32 := math.Float32frombits(uint32(val))
+ switch {
+ case f32 != f32:
+ di.w.WriteString("NaN")
+ return nil
+ case f32 > math.MaxFloat32:
+ di.w.WriteString("Infinity")
+ return nil
+ case f32 < -math.MaxFloat32:
+ di.w.WriteString("-Infinity")
+ return nil
+ default:
+ f64 = float64(f32)
+ }
+
+ case additionalInformationAsFloat64:
+ f64 = math.Float64frombits(val)
+ switch {
+ case f64 != f64:
+ di.w.WriteString("NaN")
+ return nil
+ case f64 > math.MaxFloat64:
+ di.w.WriteString("Infinity")
+ return nil
+ case f64 < -math.MaxFloat64:
+ di.w.WriteString("-Infinity")
+ return nil
+ }
+ }
+ // Use ES6 number to string conversion which should match most JSON generators.
+ // Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585
+ const bitSize = 64
+ b := make([]byte, 0, 32)
+ if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) {
+ b = strconv.AppendFloat(b, f64, 'e', -1, bitSize)
+ // clean up e-09 to e-9
+ n := len(b)
+ if n >= 4 && string(b[n-4:n-1]) == "e-0" {
+ b = append(b[:n-2], b[n-1])
+ }
+ } else {
+ b = strconv.AppendFloat(b, f64, 'f', -1, bitSize)
+ }
+
+ // add decimal point and trailing zero if needed
+ if bytes.IndexByte(b, '.') < 0 {
+ if i := bytes.IndexByte(b, 'e'); i < 0 {
+ b = append(b, '.', '0')
+ } else {
+ b = append(b[:i+2], b[i:]...)
+ b[i] = '.'
+ b[i+1] = '0'
+ }
+ }
+
+ di.w.WriteString(string(b))
+
+ if di.dm.floatPrecisionIndicator {
+ switch ai {
+ case additionalInformationAsFloat16:
+ di.w.WriteString("_1")
+ return nil
+
+ case additionalInformationAsFloat32:
+ di.w.WriteString("_2")
+ return nil
+
+ case additionalInformationAsFloat64:
+ di.w.WriteString("_3")
+ return nil
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/doc.go b/vendor/github.com/fxamacker/cbor/v2/doc.go
new file mode 100644
index 000000000..c758b7374
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/doc.go
@@ -0,0 +1,152 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+/*
+Package cbor is a modern CBOR codec (RFC 8949 & RFC 8742) with CBOR tags,
+Go struct tag options (toarray/keyasint/omitempty/omitzero), Core Deterministic Encoding,
+CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection.
+
+Encoding options allow "preferred serialization" by encoding integers and floats
+to their smallest forms (e.g. float16) when values fit.
+
+Struct tag options "keyasint", "toarray", "omitempty", and "omitzero" reduce encoding size
+and reduce programming effort.
+
+For example, "toarray" tag makes struct fields encode to CBOR array elements. And
+"keyasint" makes a field encode to an element of CBOR map with specified int key.
+
+Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go
+
+# Basics
+
+The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start
+
+Function signatures identical to encoding/json include:
+
+ Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode
+
+Standard interfaces include:
+
+ BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler
+
+Diagnostic functions translate CBOR data item into Diagnostic Notation:
+
+ Diagnose, DiagnoseFirst
+
+Functions that simplify using CBOR Sequences (RFC 8742) include:
+
+ UnmarshalFirst
+
+Custom encoding and decoding is possible by implementing standard interfaces for
+user-defined Go types.
+
+Codec functions are available at package-level (using defaults options) or by
+creating modes from options at runtime.
+
+"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode).
+
+EncMode and DecMode interfaces are created from EncOptions or DecOptions structs.
+
+ em, err := cbor.EncOptions{...}.EncMode()
+ em, err := cbor.CanonicalEncOptions().EncMode()
+ em, err := cbor.CTAP2EncOptions().EncMode()
+
+Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of
+modes won't accidentally change at runtime after they're created.
+
+Modes are intended to be reused and are safe for concurrent use.
+
+EncMode and DecMode Interfaces
+
+ // EncMode interface uses immutable options and is safe for concurrent use.
+ type EncMode interface {
+ Marshal(v interface{}) ([]byte, error)
+ NewEncoder(w io.Writer) *Encoder
+ EncOptions() EncOptions // returns copy of options
+ }
+
+ // DecMode interface uses immutable options and is safe for concurrent use.
+ type DecMode interface {
+ Unmarshal(data []byte, v interface{}) error
+ NewDecoder(r io.Reader) *Decoder
+ DecOptions() DecOptions // returns copy of options
+ }
+
+Using Default Encoding Mode
+
+ b, err := cbor.Marshal(v)
+
+ encoder := cbor.NewEncoder(w)
+ err = encoder.Encode(v)
+
+Using Default Decoding Mode
+
+ err := cbor.Unmarshal(b, &v)
+
+ decoder := cbor.NewDecoder(r)
+ err = decoder.Decode(&v)
+
+Using Default Mode of UnmarshalFirst to Decode CBOR Sequences
+
+ // Decode the first CBOR data item and return remaining bytes:
+ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
+
+Using Extended Diagnostic Notation (EDN) to represent CBOR data
+
+ // Translate the first CBOR data item into text and return remaining bytes.
+ text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to text
+
+Creating and Using Encoding Modes
+
+ // Create EncOptions using either struct literal or a function.
+ opts := cbor.CanonicalEncOptions()
+
+ // If needed, modify encoding options
+ opts.Time = cbor.TimeUnix
+
+ // Create reusable EncMode interface with immutable options, safe for concurrent use.
+ em, err := opts.EncMode()
+
+ // Use EncMode like encoding/json, with same function signatures.
+ b, err := em.Marshal(v)
+ // or
+ encoder := em.NewEncoder(w)
+ err := encoder.Encode(v)
+
+ // NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options
+ // specified during creation of em (encoding mode).
+
+# CBOR Options
+
+Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options
+
+Encoding Options: https://github.com/fxamacker/cbor#encoding-options
+
+Decoding Options: https://github.com/fxamacker/cbor#decoding-options
+
+# Struct Tags
+
+Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected.
+If both struct tags are specified then `cbor` is used.
+
+Struct tag options like "keyasint", "toarray", "omitempty", and "omitzero" make it easy to use
+very compact formats like COSE and CWT (CBOR Web Tokens) with structs.
+
+The "omitzero" option omits zero values from encoding, matching
+[stdlib encoding/json behavior](https://pkg.go.dev/encoding/json#Marshal).
+When specified in the `cbor` tag, the option is always honored.
+When specified in the `json` tag, the option is honored when building with Go 1.24+.
+
+For example, "toarray" makes struct fields encode to array elements. And "keyasint"
+makes struct fields encode to elements of CBOR map with int keys.
+
+https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png
+
+Struct tag options are listed at https://github.com/fxamacker/cbor#struct-tags-1
+
+# Tests and Fuzzing
+
+Over 375 tests are included in this package. Cover-guided fuzzing is handled by
+a private fuzzer that replaced fxamacker/cbor-fuzz years ago.
+*/
+package cbor
diff --git a/vendor/github.com/fxamacker/cbor/v2/encode.go b/vendor/github.com/fxamacker/cbor/v2/encode.go
new file mode 100644
index 000000000..c550617c3
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/encode.go
@@ -0,0 +1,2299 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "math/rand"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/x448/float16"
+)
+
+// Marshal returns the CBOR encoding of v using default encoding options.
+// See EncOptions for encoding options.
+//
+// Marshal uses the following encoding rules:
+//
+// If value implements the Marshaler interface, Marshal calls its
+// MarshalCBOR method.
+//
+// If value implements encoding.BinaryMarshaler, Marhsal calls its
+// MarshalBinary method and encode it as CBOR byte string.
+//
+// Boolean values encode as CBOR booleans (type 7).
+//
+// Positive integer values encode as CBOR positive integers (type 0).
+//
+// Negative integer values encode as CBOR negative integers (type 1).
+//
+// Floating point values encode as CBOR floating points (type 7).
+//
+// String values encode as CBOR text strings (type 3).
+//
+// []byte values encode as CBOR byte strings (type 2).
+//
+// Array and slice values encode as CBOR arrays (type 4).
+//
+// Map values encode as CBOR maps (type 5).
+//
+// Struct values encode as CBOR maps (type 5). Each exported struct field
+// becomes a pair with field name encoded as CBOR text string (type 3) and
+// field value encoded based on its type. See struct tag option "keyasint"
+// to encode field name as CBOR integer (type 0 and 1). Also see struct
+// tag option "toarray" for special field "_" to encode struct values as
+// CBOR array (type 4).
+//
+// Marshal supports format string stored under the "cbor" key in the struct
+// field's tag. CBOR format string can specify the name of the field,
+// "omitempty", "omitzero" and "keyasint" options, and special case "-" for
+// field omission. If "cbor" key is absent, Marshal uses "json" key.
+// When using the "json" key, the "omitzero" option is honored when building
+// with Go 1.24+ to match stdlib encoding/json behavior.
+//
+// Struct field name is treated as integer if it has "keyasint" option in
+// its format string. The format string must specify an integer as its
+// field name.
+//
+// Special struct field "_" is used to specify struct level options, such as
+// "toarray". "toarray" option enables Go struct to be encoded as CBOR array.
+// "omitempty" and "omitzero" are disabled by "toarray" to ensure that the
+// same number of elements are encoded every time.
+//
+// Anonymous struct fields are marshaled as if their exported fields
+// were fields in the outer struct. Marshal follows the same struct fields
+// visibility rules used by JSON encoding package.
+//
+// time.Time values encode as text strings specified in RFC3339 or numerical
+// representation of seconds since January 1, 1970 UTC depending on
+// EncOptions.Time setting. Also See EncOptions.TimeTag to encode
+// time.Time as CBOR tag with tag number 0 or 1.
+//
+// big.Int values encode as CBOR integers (type 0 and 1) if values fit.
+// Otherwise, big.Int values encode as CBOR bignums (tag 2 and 3). See
+// EncOptions.BigIntConvert to always encode big.Int values as CBOR
+// bignums.
+//
+// Pointer values encode as the value pointed to.
+//
+// Interface values encode as the value stored in the interface.
+//
+// Nil slice/map/pointer/interface values encode as CBOR nulls (type 7).
+//
+// Values of other types cannot be encoded in CBOR. Attempting
+// to encode such a value causes Marshal to return an UnsupportedTypeError.
+func Marshal(v any) ([]byte, error) {
+ return defaultEncMode.Marshal(v)
+}
+
+// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool)
+// and uses default encoding options.
+//
+// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain
+// partially encoded data if error is returned.
+//
+// See Marshal for more details.
+func MarshalToBuffer(v any, buf *bytes.Buffer) error {
+ return defaultEncMode.MarshalToBuffer(v, buf)
+}
+
+// Marshaler is the interface implemented by types that can marshal themselves
+// into valid CBOR.
+type Marshaler interface {
+ MarshalCBOR() ([]byte, error)
+}
+
+// MarshalerError represents error from checking encoded CBOR data item
+// returned from MarshalCBOR for well-formedness and some very limited tag validation.
+type MarshalerError struct {
+ typ reflect.Type
+ err error
+}
+
+func (e *MarshalerError) Error() string {
+ return "cbor: error calling MarshalCBOR for type " +
+ e.typ.String() +
+ ": " + e.err.Error()
+}
+
+func (e *MarshalerError) Unwrap() error {
+ return e.err
+}
+
+type TranscodeError struct {
+ err error
+ rtype reflect.Type
+ sourceFormat, targetFormat string
+}
+
+func (e TranscodeError) Error() string {
+ return "cbor: cannot transcode from " + e.sourceFormat + " to " + e.targetFormat + ": " + e.err.Error()
+}
+
+func (e TranscodeError) Unwrap() error {
+ return e.err
+}
+
+// UnsupportedTypeError is returned by Marshal when attempting to encode value
+// of an unsupported type.
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "cbor: unsupported type: " + e.Type.String()
+}
+
+// UnsupportedValueError is returned by Marshal when attempting to encode an
+// unsupported value.
+type UnsupportedValueError struct {
+ msg string
+}
+
+func (e *UnsupportedValueError) Error() string {
+ return "cbor: unsupported value: " + e.msg
+}
+
+// SortMode identifies supported sorting order.
+type SortMode int
+
+const (
+ // SortNone encodes map pairs and struct fields in an arbitrary order.
+ SortNone SortMode = 0
+
+ // SortLengthFirst causes map keys or struct fields to be sorted such that:
+ // - If two keys have different lengths, the shorter one sorts earlier;
+ // - If two keys have the same length, the one with the lower value in
+ // (byte-wise) lexical order sorts earlier.
+ // It is used in "Canonical CBOR" encoding in RFC 7049 3.9.
+ SortLengthFirst SortMode = 1
+
+ // SortBytewiseLexical causes map keys or struct fields to be sorted in the
+ // bytewise lexicographic order of their deterministic CBOR encodings.
+ // It is used in "CTAP2 Canonical CBOR" and "Core Deterministic Encoding"
+ // in RFC 7049bis.
+ SortBytewiseLexical SortMode = 2
+
+ // SortShuffle encodes map pairs and struct fields in a shuffled
+ // order. This mode does not guarantee an unbiased permutation, but it
+ // does guarantee that the runtime of the shuffle algorithm used will be
+ // constant.
+ SortFastShuffle SortMode = 3
+
+ // SortCanonical is used in "Canonical CBOR" encoding in RFC 7049 3.9.
+ SortCanonical SortMode = SortLengthFirst
+
+ // SortCTAP2 is used in "CTAP2 Canonical CBOR".
+ SortCTAP2 SortMode = SortBytewiseLexical
+
+ // SortCoreDeterministic is used in "Core Deterministic Encoding" in RFC 7049bis.
+ SortCoreDeterministic SortMode = SortBytewiseLexical
+
+ maxSortMode SortMode = 4
+)
+
+func (sm SortMode) valid() bool {
+ return sm >= 0 && sm < maxSortMode
+}
+
+// StringMode specifies how to encode Go string values.
+type StringMode int
+
+const (
+ // StringToTextString encodes Go string to CBOR text string (major type 3).
+ StringToTextString StringMode = iota
+
+ // StringToByteString encodes Go string to CBOR byte string (major type 2).
+ StringToByteString
+)
+
+func (st StringMode) cborType() (cborType, error) {
+ switch st {
+ case StringToTextString:
+ return cborTypeTextString, nil
+
+ case StringToByteString:
+ return cborTypeByteString, nil
+ }
+ return 0, errors.New("cbor: invalid StringType " + strconv.Itoa(int(st)))
+}
+
+// ShortestFloatMode specifies which floating-point format should
+// be used as the shortest possible format for CBOR encoding.
+// It is not used for encoding Infinity and NaN values.
+type ShortestFloatMode int
+
+const (
+ // ShortestFloatNone makes float values encode without any conversion.
+ // This is the default for ShortestFloatMode in v1.
+ // E.g. a float32 in Go will encode to CBOR float32. And
+ // a float64 in Go will encode to CBOR float64.
+ ShortestFloatNone ShortestFloatMode = iota
+
+ // ShortestFloat16 specifies float16 as the shortest form that preserves value.
+ // E.g. if float64 can convert to float32 while preserving value, then
+ // encoding will also try to convert float32 to float16. So a float64 might
+ // encode as CBOR float64, float32 or float16 depending on the value.
+ ShortestFloat16
+
+ maxShortestFloat
+)
+
+func (sfm ShortestFloatMode) valid() bool {
+ return sfm >= 0 && sfm < maxShortestFloat
+}
+
+// NaNConvertMode specifies how to encode NaN and overrides ShortestFloatMode.
+// ShortestFloatMode is not used for encoding Infinity and NaN values.
+type NaNConvertMode int
+
+const (
+ // NaNConvert7e00 always encodes NaN to 0xf97e00 (CBOR float16 = 0x7e00).
+ NaNConvert7e00 NaNConvertMode = iota
+
+ // NaNConvertNone never modifies or converts NaN to other representations
+ // (float64 NaN stays float64, etc. even if it can use float16 without losing
+ // any bits).
+ NaNConvertNone
+
+ // NaNConvertPreserveSignal converts NaN to the smallest form that preserves
+ // value (quiet bit + payload) as described in RFC 7049bis Draft 12.
+ NaNConvertPreserveSignal
+
+ // NaNConvertQuiet always forces quiet bit = 1 and shortest form that preserves
+ // NaN payload.
+ NaNConvertQuiet
+
+ // NaNConvertReject returns UnsupportedValueError on attempts to encode a NaN value.
+ NaNConvertReject
+
+ maxNaNConvert
+)
+
+func (ncm NaNConvertMode) valid() bool {
+ return ncm >= 0 && ncm < maxNaNConvert
+}
+
+// InfConvertMode specifies how to encode Infinity and overrides ShortestFloatMode.
+// ShortestFloatMode is not used for encoding Infinity and NaN values.
+type InfConvertMode int
+
+const (
+ // InfConvertFloat16 always converts Inf to lossless IEEE binary16 (float16).
+ InfConvertFloat16 InfConvertMode = iota
+
+ // InfConvertNone never converts (used by CTAP2 Canonical CBOR).
+ InfConvertNone
+
+ // InfConvertReject returns UnsupportedValueError on attempts to encode an infinite value.
+ InfConvertReject
+
+ maxInfConvert
+)
+
+func (icm InfConvertMode) valid() bool {
+ return icm >= 0 && icm < maxInfConvert
+}
+
+// TimeMode specifies how to encode time.Time values in compliance with RFC 8949 (CBOR):
+// - Section 3.4.1: Standard Date/Time String
+// - Section 3.4.2: Epoch-Based Date/Time
+// For more info, see:
+// - https://www.rfc-editor.org/rfc/rfc8949.html
+// NOTE: User applications that prefer to encode time with fractional seconds to an integer
+// (instead of floating point or text string) can use a CBOR tag number not assigned by IANA:
+// 1. Define a user-defined type in Go with just a time.Time or int64 as its data.
+// 2. Implement the cbor.Marshaler and cbor.Unmarshaler interface for that user-defined type
+// to encode or decode the tagged data item with an enclosed integer content.
+type TimeMode int
+
+const (
+ // TimeUnix causes time.Time to encode to a CBOR time (tag 1) with an integer content
+ // representing seconds elapsed (with 1-second precision) since UNIX Epoch UTC.
+ // The TimeUnix option is location independent and has a clear precision guarantee.
+ TimeUnix TimeMode = iota
+
+ // TimeUnixMicro causes time.Time to encode to a CBOR time (tag 1) with a floating point content
+ // representing seconds elapsed (with up to 1-microsecond precision) since UNIX Epoch UTC.
+ // NOTE: The floating point content is encoded to the shortest floating-point encoding that preserves
+ // the 64-bit floating point value. I.e., the floating point encoding can be IEEE 764:
+ // binary64, binary32, or binary16 depending on the content's value.
+ TimeUnixMicro
+
+ // TimeUnixDynamic causes time.Time to encode to a CBOR time (tag 1) with either an integer content or
+ // a floating point content, depending on the content's value. This option is equivalent to dynamically
+ // choosing TimeUnix if time.Time doesn't have fractional seconds, and using TimeUnixMicro if time.Time
+ // has fractional seconds.
+ TimeUnixDynamic
+
+ // TimeRFC3339 causes time.Time to encode to a CBOR time (tag 0) with a text string content
+ // representing the time using 1-second precision in RFC3339 format. If the time.Time has a
+ // non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339.
+ // NOTE: User applications can avoid including the RFC3339 numeric offset by:
+ // - providing a time.Time value set to UTC, or
+ // - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339.
+ TimeRFC3339
+
+ // TimeRFC3339Nano causes time.Time to encode to a CBOR time (tag 0) with a text string content
+ // representing the time using 1-nanosecond precision in RFC3339 format. If the time.Time has a
+ // non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339.
+ // NOTE: User applications can avoid including the RFC3339 numeric offset by:
+ // - providing a time.Time value set to UTC, or
+ // - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339Nano.
+ TimeRFC3339Nano
+
+ maxTimeMode
+)
+
+func (tm TimeMode) valid() bool {
+ return tm >= 0 && tm < maxTimeMode
+}
+
+// BigIntConvertMode specifies how to encode big.Int values.
+type BigIntConvertMode int
+
+const (
+ // BigIntConvertShortest makes big.Int encode to CBOR integer if value fits.
+ // E.g. if big.Int value can be converted to CBOR integer while preserving
+ // value, encoder will encode it to CBOR integer (major type 0 or 1).
+ BigIntConvertShortest BigIntConvertMode = iota
+
+ // BigIntConvertNone makes big.Int encode to CBOR bignum (tag 2 or 3) without
+ // converting it to another CBOR type.
+ BigIntConvertNone
+
+ // BigIntConvertReject returns an UnsupportedTypeError instead of marshaling a big.Int.
+ BigIntConvertReject
+
+ maxBigIntConvert
+)
+
+func (bim BigIntConvertMode) valid() bool {
+ return bim >= 0 && bim < maxBigIntConvert
+}
+
+// NilContainersMode specifies how to encode nil slices and maps.
+type NilContainersMode int
+
+const (
+ // NilContainerAsNull encodes nil slices and maps as CBOR null.
+ // This is the default.
+ NilContainerAsNull NilContainersMode = iota
+
+ // NilContainerAsEmpty encodes nil slices and maps as
+ // empty container (CBOR bytestring, array, or map).
+ NilContainerAsEmpty
+
+ maxNilContainersMode
+)
+
+func (m NilContainersMode) valid() bool {
+ return m >= 0 && m < maxNilContainersMode
+}
+
+// OmitEmptyMode specifies how to encode struct fields with omitempty tag.
+// The default behavior omits if field value would encode as empty CBOR value.
+type OmitEmptyMode int
+
+const (
+ // OmitEmptyCBORValue specifies that struct fields tagged with "omitempty"
+ // should be omitted from encoding if the field would be encoded as an empty
+ // CBOR value, such as CBOR false, 0, 0.0, nil, empty byte, empty string,
+ // empty array, or empty map.
+ OmitEmptyCBORValue OmitEmptyMode = iota
+
+ // OmitEmptyGoValue specifies that struct fields tagged with "omitempty"
+ // should be omitted from encoding if the field has an empty Go value,
+ // defined as false, 0, 0.0, a nil pointer, a nil interface value, and
+ // any empty array, slice, map, or string.
+ // This behavior is the same as the current (aka v1) encoding/json package
+ // included in Go.
+ OmitEmptyGoValue
+
+ maxOmitEmptyMode
+)
+
+func (om OmitEmptyMode) valid() bool {
+ return om >= 0 && om < maxOmitEmptyMode
+}
+
+// FieldNameMode specifies the CBOR type to use when encoding struct field names.
+type FieldNameMode int
+
+const (
+ // FieldNameToTextString encodes struct fields to CBOR text string (major type 3).
+ FieldNameToTextString FieldNameMode = iota
+
+ // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2).
+ FieldNameToByteString
+
+ maxFieldNameMode
+)
+
+func (fnm FieldNameMode) valid() bool {
+ return fnm >= 0 && fnm < maxFieldNameMode
+}
+
+// ByteSliceLaterFormatMode specifies which later format conversion hint (CBOR tag 21-23)
+// to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will
+// always encode unmodified bytes from the byte slice and just wrap it within
+// CBOR tag 21, 22, or 23 if specified.
+// See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2.
+type ByteSliceLaterFormatMode int
+
+const (
+ // ByteSliceLaterFormatNone encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2)
+ // without adding CBOR tag 21, 22, or 23.
+ ByteSliceLaterFormatNone ByteSliceLaterFormatMode = iota
+
+ // ByteSliceLaterFormatBase64URL encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2)
+ // inside CBOR tag 21 (expected later conversion to base64url encoding, see RFC 8949 Section 3.4.5.2).
+ ByteSliceLaterFormatBase64URL
+
+ // ByteSliceLaterFormatBase64 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2)
+ // inside CBOR tag 22 (expected later conversion to base64 encoding, see RFC 8949 Section 3.4.5.2).
+ ByteSliceLaterFormatBase64
+
+ // ByteSliceLaterFormatBase16 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2)
+ // inside CBOR tag 23 (expected later conversion to base16 encoding, see RFC 8949 Section 3.4.5.2).
+ ByteSliceLaterFormatBase16
+)
+
+func (bsefm ByteSliceLaterFormatMode) encodingTag() (uint64, error) {
+ switch bsefm {
+ case ByteSliceLaterFormatNone:
+ return 0, nil
+
+ case ByteSliceLaterFormatBase64URL:
+ return tagNumExpectedLaterEncodingBase64URL, nil
+
+ case ByteSliceLaterFormatBase64:
+ return tagNumExpectedLaterEncodingBase64, nil
+
+ case ByteSliceLaterFormatBase16:
+ return tagNumExpectedLaterEncodingBase16, nil
+ }
+ return 0, errors.New("cbor: invalid ByteSliceLaterFormat " + strconv.Itoa(int(bsefm)))
+}
+
+// ByteArrayMode specifies how to encode byte arrays.
+type ByteArrayMode int
+
+const (
+ // ByteArrayToByteSlice encodes byte arrays the same way that a byte slice with identical
+ // length and contents is encoded.
+ ByteArrayToByteSlice ByteArrayMode = iota
+
+ // ByteArrayToArray encodes byte arrays to the CBOR array type with one unsigned integer
+ // item for each byte in the array.
+ ByteArrayToArray
+
+ maxByteArrayMode
+)
+
+func (bam ByteArrayMode) valid() bool {
+ return bam >= 0 && bam < maxByteArrayMode
+}
+
+// BinaryMarshalerMode specifies how to encode types that implement encoding.BinaryMarshaler.
+type BinaryMarshalerMode int
+
+const (
+ // BinaryMarshalerByteString encodes the output of MarshalBinary to a CBOR byte string.
+ BinaryMarshalerByteString BinaryMarshalerMode = iota
+
+ // BinaryMarshalerNone does not recognize BinaryMarshaler implementations during encode.
+ BinaryMarshalerNone
+
+ maxBinaryMarshalerMode
+)
+
+func (bmm BinaryMarshalerMode) valid() bool {
+ return bmm >= 0 && bmm < maxBinaryMarshalerMode
+}
+
+// TextMarshalerMode specifies how to encode types that implement encoding.TextMarshaler.
+type TextMarshalerMode int
+
+const (
+ // TextMarshalerNone does not recognize TextMarshaler implementations during encode.
+ // This is the default behavior.
+ TextMarshalerNone TextMarshalerMode = iota
+
+ // TextMarshalerTextString encodes the output of MarshalText to a CBOR text string.
+ TextMarshalerTextString
+
+ maxTextMarshalerMode
+)
+
+func (tmm TextMarshalerMode) valid() bool {
+ return tmm >= 0 && tmm < maxTextMarshalerMode
+}
+
+// EncOptions specifies encoding options.
+type EncOptions struct {
+ // Sort specifies sorting order.
+ Sort SortMode
+
+ // ShortestFloat specifies the shortest floating-point encoding that preserves
+ // the value being encoded.
+ ShortestFloat ShortestFloatMode
+
+ // NaNConvert specifies how to encode NaN and it overrides ShortestFloatMode.
+ NaNConvert NaNConvertMode
+
+ // InfConvert specifies how to encode Inf and it overrides ShortestFloatMode.
+ InfConvert InfConvertMode
+
+ // BigIntConvert specifies how to encode big.Int values.
+ BigIntConvert BigIntConvertMode
+
+ // Time specifies how to encode time.Time.
+ Time TimeMode
+
+ // TimeTag allows time.Time to be encoded with a tag number.
+ // RFC3339 format gets tag number 0, and numeric epoch time tag number 1.
+ TimeTag EncTagMode
+
+ // IndefLength specifies whether to allow indefinite length CBOR items.
+ IndefLength IndefLengthMode
+
+ // NilContainers specifies how to encode nil slices and maps.
+ NilContainers NilContainersMode
+
+ // TagsMd specifies whether to allow CBOR tags (major type 6).
+ TagsMd TagsMode
+
+ // OmitEmptyMode specifies how to encode struct fields with omitempty tag.
+ OmitEmpty OmitEmptyMode
+
+ // String specifies which CBOR type to use when encoding Go strings.
+ // - CBOR text string (major type 3) is default
+ // - CBOR byte string (major type 2)
+ String StringMode
+
+ // FieldName specifies the CBOR type to use when encoding struct field names.
+ FieldName FieldNameMode
+
+ // ByteSliceLaterFormat specifies which later format conversion hint (CBOR tag 21-23)
+ // to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will
+ // always encode unmodified bytes from the byte slice and just wrap it within
+ // CBOR tag 21, 22, or 23 if specified.
+ // See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2.
+ ByteSliceLaterFormat ByteSliceLaterFormatMode
+
+ // ByteArray specifies how to encode byte arrays.
+ ByteArray ByteArrayMode
+
+ // BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler.
+ BinaryMarshaler BinaryMarshalerMode
+
+ // TextMarshaler specifies how to encode types that implement encoding.TextMarshaler.
+ TextMarshaler TextMarshalerMode
+
+ // JSONMarshalerTranscoder sets the transcoding scheme used to marshal types that implement
+ // json.Marshaler but do not also implement cbor.Marshaler. If nil, encoding behavior is not
+ // influenced by whether or not a type implements json.Marshaler.
+ JSONMarshalerTranscoder Transcoder
+}
+
+// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding,
+// defined in RFC 7049 Section 3.9 with the following rules:
+//
+// 1. "Integers must be as small as possible."
+// 2. "The expression of lengths in major types 2 through 5 must be as short as possible."
+// 3. The keys in every map must be sorted in length-first sorting order.
+// See SortLengthFirst for details.
+// 4. "Indefinite-length items must be made into definite-length items."
+// 5. "If a protocol allows for IEEE floats, then additional canonicalization rules might
+// need to be added. One example rule might be to have all floats start as a 64-bit
+// float, then do a test conversion to a 32-bit float; if the result is the same numeric
+// value, use the shorter value and repeat the process with a test conversion to a
+// 16-bit float. (This rule selects 16-bit float for positive and negative Infinity
+// as well.) Also, there are many representations for NaN. If NaN is an allowed value,
+// it must always be represented as 0xf97e00."
+func CanonicalEncOptions() EncOptions {
+ return EncOptions{
+ Sort: SortCanonical,
+ ShortestFloat: ShortestFloat16,
+ NaNConvert: NaNConvert7e00,
+ InfConvert: InfConvertFloat16,
+ IndefLength: IndefLengthForbidden,
+ }
+}
+
+// CTAP2EncOptions returns EncOptions for "CTAP2 Canonical CBOR" encoding,
+// defined in CTAP specification, with the following rules:
+//
+// 1. "Integers must be encoded as small as possible."
+// 2. "The representations of any floating-point values are not changed."
+// 3. "The expression of lengths in major types 2 through 5 must be as short as possible."
+// 4. "Indefinite-length items must be made into definite-length items.""
+// 5. The keys in every map must be sorted in bytewise lexicographic order.
+// See SortBytewiseLexical for details.
+// 6. "Tags as defined in Section 2.4 in [RFC7049] MUST NOT be present."
+func CTAP2EncOptions() EncOptions {
+ return EncOptions{
+ Sort: SortCTAP2,
+ ShortestFloat: ShortestFloatNone,
+ NaNConvert: NaNConvertNone,
+ InfConvert: InfConvertNone,
+ IndefLength: IndefLengthForbidden,
+ TagsMd: TagsForbidden,
+ }
+}
+
+// CoreDetEncOptions returns EncOptions for "Core Deterministic" encoding,
+// defined in RFC 7049bis with the following rules:
+//
+// 1. "Preferred serialization MUST be used. In particular, this means that arguments
+// (see Section 3) for integers, lengths in major types 2 through 5, and tags MUST
+// be as short as possible"
+// "Floating point values also MUST use the shortest form that preserves the value"
+// 2. "Indefinite-length items MUST NOT appear."
+// 3. "The keys in every map MUST be sorted in the bytewise lexicographic order of
+// their deterministic encodings."
+func CoreDetEncOptions() EncOptions {
+ return EncOptions{
+ Sort: SortCoreDeterministic,
+ ShortestFloat: ShortestFloat16,
+ NaNConvert: NaNConvert7e00,
+ InfConvert: InfConvertFloat16,
+ IndefLength: IndefLengthForbidden,
+ }
+}
+
+// PreferredUnsortedEncOptions returns EncOptions for "Preferred Serialization" encoding,
+// defined in RFC 7049bis with the following rules:
+//
+// 1. "The preferred serialization always uses the shortest form of representing the argument
+// (Section 3);"
+// 2. "it also uses the shortest floating-point encoding that preserves the value being
+// encoded (see Section 5.5)."
+// "The preferred encoding for a floating-point value is the shortest floating-point encoding
+// that preserves its value, e.g., 0xf94580 for the number 5.5, and 0xfa45ad9c00 for the
+// number 5555.5, unless the CBOR-based protocol specifically excludes the use of the shorter
+// floating-point encodings. For NaN values, a shorter encoding is preferred if zero-padding
+// the shorter significand towards the right reconstitutes the original NaN value (for many
+// applications, the single NaN encoding 0xf97e00 will suffice)."
+// 3. "Definite length encoding is preferred whenever the length is known at the time the
+// serialization of the item starts."
+func PreferredUnsortedEncOptions() EncOptions {
+ return EncOptions{
+ Sort: SortNone,
+ ShortestFloat: ShortestFloat16,
+ NaNConvert: NaNConvert7e00,
+ InfConvert: InfConvertFloat16,
+ }
+}
+
+// EncMode returns EncMode with immutable options and no tags (safe for concurrency).
+func (opts EncOptions) EncMode() (EncMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.encMode()
+}
+
+// UserBufferEncMode returns UserBufferEncMode with immutable options and no tags (safe for concurrency).
+func (opts EncOptions) UserBufferEncMode() (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.encMode()
+}
+
+// EncModeWithTags returns EncMode with options and tags that are both immutable (safe for concurrency).
+func (opts EncOptions) EncModeWithTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.UserBufferEncModeWithTags(tags)
+}
+
+// UserBufferEncModeWithTags returns UserBufferEncMode with options and tags that are both immutable (safe for concurrency).
+func (opts EncOptions) UserBufferEncModeWithTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam
+ if opts.TagsMd == TagsForbidden {
+ return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden")
+ }
+ if tags == nil {
+ return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet")
+ }
+ em, err := opts.encMode()
+ if err != nil {
+ return nil, err
+ }
+ // Copy tags
+ ts := tagSet(make(map[reflect.Type]*tagItem))
+ syncTags := tags.(*syncTagSet)
+ syncTags.RLock()
+ for contentType, tag := range syncTags.t {
+ if tag.opts.EncTag != EncTagNone {
+ ts[contentType] = tag
+ }
+ }
+ syncTags.RUnlock()
+ if len(ts) > 0 {
+ em.tags = ts
+ }
+ return em, nil
+}
+
+// EncModeWithSharedTags returns EncMode with immutable options and mutable shared tags (safe for concurrency).
+func (opts EncOptions) EncModeWithSharedTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam
+ return opts.UserBufferEncModeWithSharedTags(tags)
+}
+
+// UserBufferEncModeWithSharedTags returns UserBufferEncMode with immutable options and mutable shared tags (safe for concurrency).
+func (opts EncOptions) UserBufferEncModeWithSharedTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam
+ if opts.TagsMd == TagsForbidden {
+ return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden")
+ }
+ if tags == nil {
+ return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet")
+ }
+ em, err := opts.encMode()
+ if err != nil {
+ return nil, err
+ }
+ em.tags = tags
+ return em, nil
+}
+
+func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore hugeParam
+ if !opts.Sort.valid() {
+ return nil, errors.New("cbor: invalid SortMode " + strconv.Itoa(int(opts.Sort)))
+ }
+ if !opts.ShortestFloat.valid() {
+ return nil, errors.New("cbor: invalid ShortestFloatMode " + strconv.Itoa(int(opts.ShortestFloat)))
+ }
+ if !opts.NaNConvert.valid() {
+ return nil, errors.New("cbor: invalid NaNConvertMode " + strconv.Itoa(int(opts.NaNConvert)))
+ }
+ if !opts.InfConvert.valid() {
+ return nil, errors.New("cbor: invalid InfConvertMode " + strconv.Itoa(int(opts.InfConvert)))
+ }
+ if !opts.BigIntConvert.valid() {
+ return nil, errors.New("cbor: invalid BigIntConvertMode " + strconv.Itoa(int(opts.BigIntConvert)))
+ }
+ if !opts.Time.valid() {
+ return nil, errors.New("cbor: invalid TimeMode " + strconv.Itoa(int(opts.Time)))
+ }
+ if !opts.TimeTag.valid() {
+ return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag)))
+ }
+ if !opts.IndefLength.valid() {
+ return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength)))
+ }
+ if !opts.NilContainers.valid() {
+ return nil, errors.New("cbor: invalid NilContainers " + strconv.Itoa(int(opts.NilContainers)))
+ }
+ if !opts.TagsMd.valid() {
+ return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd)))
+ }
+ if opts.TagsMd == TagsForbidden && opts.TimeTag == EncTagRequired {
+ return nil, errors.New("cbor: cannot set TagsMd to TagsForbidden when TimeTag is EncTagRequired")
+ }
+ if !opts.OmitEmpty.valid() {
+ return nil, errors.New("cbor: invalid OmitEmpty " + strconv.Itoa(int(opts.OmitEmpty)))
+ }
+ stringMajorType, err := opts.String.cborType()
+ if err != nil {
+ return nil, err
+ }
+ if !opts.FieldName.valid() {
+ return nil, errors.New("cbor: invalid FieldName " + strconv.Itoa(int(opts.FieldName)))
+ }
+ byteSliceLaterEncodingTag, err := opts.ByteSliceLaterFormat.encodingTag()
+ if err != nil {
+ return nil, err
+ }
+ if !opts.ByteArray.valid() {
+ return nil, errors.New("cbor: invalid ByteArray " + strconv.Itoa(int(opts.ByteArray)))
+ }
+ if !opts.BinaryMarshaler.valid() {
+ return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler)))
+ }
+ if !opts.TextMarshaler.valid() {
+ return nil, errors.New("cbor: invalid TextMarshaler " + strconv.Itoa(int(opts.TextMarshaler)))
+ }
+ em := encMode{
+ sort: opts.Sort,
+ shortestFloat: opts.ShortestFloat,
+ nanConvert: opts.NaNConvert,
+ infConvert: opts.InfConvert,
+ bigIntConvert: opts.BigIntConvert,
+ time: opts.Time,
+ timeTag: opts.TimeTag,
+ indefLength: opts.IndefLength,
+ nilContainers: opts.NilContainers,
+ tagsMd: opts.TagsMd,
+ omitEmpty: opts.OmitEmpty,
+ stringType: opts.String,
+ stringMajorType: stringMajorType,
+ fieldName: opts.FieldName,
+ byteSliceLaterFormat: opts.ByteSliceLaterFormat,
+ byteSliceLaterEncodingTag: byteSliceLaterEncodingTag,
+ byteArray: opts.ByteArray,
+ binaryMarshaler: opts.BinaryMarshaler,
+ textMarshaler: opts.TextMarshaler,
+ jsonMarshalerTranscoder: opts.JSONMarshalerTranscoder,
+ }
+ return &em, nil
+}
+
+// EncMode is the main interface for CBOR encoding.
+type EncMode interface {
+ Marshal(v any) ([]byte, error)
+ NewEncoder(w io.Writer) *Encoder
+ EncOptions() EncOptions
+}
+
+// UserBufferEncMode is an interface for CBOR encoding, which extends EncMode by
+// adding MarshalToBuffer to support user specified buffer rather than encoding
+// into the built-in buffer pool.
+type UserBufferEncMode interface {
+ EncMode
+ MarshalToBuffer(v any, buf *bytes.Buffer) error
+
+ // This private method is to prevent users implementing
+ // this interface and so future additions to it will
+ // not be breaking changes.
+ // See https://go.dev/blog/module-compatibility
+ unexport()
+}
+
+type encMode struct {
+ tags tagProvider
+ sort SortMode
+ shortestFloat ShortestFloatMode
+ nanConvert NaNConvertMode
+ infConvert InfConvertMode
+ bigIntConvert BigIntConvertMode
+ time TimeMode
+ timeTag EncTagMode
+ indefLength IndefLengthMode
+ nilContainers NilContainersMode
+ tagsMd TagsMode
+ omitEmpty OmitEmptyMode
+ stringType StringMode
+ stringMajorType cborType
+ fieldName FieldNameMode
+ byteSliceLaterFormat ByteSliceLaterFormatMode
+ byteSliceLaterEncodingTag uint64
+ byteArray ByteArrayMode
+ binaryMarshaler BinaryMarshalerMode
+ textMarshaler TextMarshalerMode
+ jsonMarshalerTranscoder Transcoder
+}
+
+var defaultEncMode, _ = EncOptions{}.encMode()
+
+// These four decoding modes are used by getMarshalerDecMode.
+// maxNestedLevels, maxArrayElements, and maxMapPairs are
+// set to max allowed limits to avoid rejecting Marshaler
+// output that would have been the allowable output of a
+// non-Marshaler object that exceeds default limits.
+var (
+ marshalerForbidIndefLengthForbidTagsDecMode = decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: IndefLengthForbidden,
+ tagsMd: TagsForbidden,
+ }
+
+ marshalerAllowIndefLengthForbidTagsDecMode = decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: IndefLengthAllowed,
+ tagsMd: TagsForbidden,
+ }
+
+ marshalerForbidIndefLengthAllowTagsDecMode = decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: IndefLengthForbidden,
+ tagsMd: TagsAllowed,
+ }
+
+ marshalerAllowIndefLengthAllowTagsDecMode = decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: IndefLengthAllowed,
+ tagsMd: TagsAllowed,
+ }
+)
+
+// getMarshalerDecMode returns one of four existing decoding modes
+// which can be reused (safe for parallel use) for the purpose of
+// checking if data returned by Marshaler is well-formed.
+func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode {
+ switch {
+ case indefLength == IndefLengthAllowed && tagsMd == TagsAllowed:
+ return &marshalerAllowIndefLengthAllowTagsDecMode
+
+ case indefLength == IndefLengthAllowed && tagsMd == TagsForbidden:
+ return &marshalerAllowIndefLengthForbidTagsDecMode
+
+ case indefLength == IndefLengthForbidden && tagsMd == TagsAllowed:
+ return &marshalerForbidIndefLengthAllowTagsDecMode
+
+ case indefLength == IndefLengthForbidden && tagsMd == TagsForbidden:
+ return &marshalerForbidIndefLengthForbidTagsDecMode
+
+ default:
+ // This should never happen, unless we add new options to
+ // IndefLengthMode or TagsMode without updating this function.
+ return &decMode{
+ maxNestedLevels: maxMaxNestedLevels,
+ maxArrayElements: maxMaxArrayElements,
+ maxMapPairs: maxMaxMapPairs,
+ indefLength: indefLength,
+ tagsMd: tagsMd,
+ }
+ }
+}
+
+// EncOptions returns user specified options used to create this EncMode.
+func (em *encMode) EncOptions() EncOptions {
+ return EncOptions{
+ Sort: em.sort,
+ ShortestFloat: em.shortestFloat,
+ NaNConvert: em.nanConvert,
+ InfConvert: em.infConvert,
+ BigIntConvert: em.bigIntConvert,
+ Time: em.time,
+ TimeTag: em.timeTag,
+ IndefLength: em.indefLength,
+ NilContainers: em.nilContainers,
+ TagsMd: em.tagsMd,
+ OmitEmpty: em.omitEmpty,
+ String: em.stringType,
+ FieldName: em.fieldName,
+ ByteSliceLaterFormat: em.byteSliceLaterFormat,
+ ByteArray: em.byteArray,
+ BinaryMarshaler: em.binaryMarshaler,
+ TextMarshaler: em.textMarshaler,
+ JSONMarshalerTranscoder: em.jsonMarshalerTranscoder,
+ }
+}
+
+func (em *encMode) unexport() {}
+
+func (em *encMode) encTagBytes(t reflect.Type) []byte {
+ if em.tags != nil {
+ if tagItem := em.tags.getTagItemFromType(t); tagItem != nil {
+ return tagItem.cborTagNum
+ }
+ }
+ return nil
+}
+
+// Marshal returns the CBOR encoding of v using em encoding mode.
+//
+// See the documentation for Marshal for details.
+func (em *encMode) Marshal(v any) ([]byte, error) {
+ e := getEncodeBuffer()
+
+ if err := encode(e, em, reflect.ValueOf(v)); err != nil {
+ putEncodeBuffer(e)
+ return nil, err
+ }
+
+ buf := make([]byte, e.Len())
+ copy(buf, e.Bytes())
+
+ putEncodeBuffer(e)
+ return buf, nil
+}
+
+// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool)
+// and uses em encoding mode.
+//
+// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain
+// partially encoded data if error is returned.
+//
+// See Marshal for more details.
+func (em *encMode) MarshalToBuffer(v any, buf *bytes.Buffer) error {
+ if buf == nil {
+ return fmt.Errorf("cbor: encoding buffer provided by user is nil")
+ }
+ return encode(buf, em, reflect.ValueOf(v))
+}
+
+// NewEncoder returns a new encoder that writes to w using em EncMode.
+func (em *encMode) NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{w: w, em: em}
+}
+
+// encodeBufferPool caches unused bytes.Buffer objects for later reuse.
+var encodeBufferPool = sync.Pool{
+ New: func() any {
+ e := new(bytes.Buffer)
+ e.Grow(32) // TODO: make this configurable
+ return e
+ },
+}
+
+func getEncodeBuffer() *bytes.Buffer {
+ return encodeBufferPool.Get().(*bytes.Buffer)
+}
+
+func putEncodeBuffer(e *bytes.Buffer) {
+ e.Reset()
+ encodeBufferPool.Put(e)
+}
+
+type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error
+type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error)
+type isZeroFunc func(v reflect.Value) (zero bool, err error)
+
+func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if !v.IsValid() {
+ // v is zero value
+ e.Write(cborNil)
+ return nil
+ }
+ vt := v.Type()
+ f, _, _ := getEncodeFunc(vt)
+ if f == nil {
+ return &UnsupportedTypeError{vt}
+ }
+
+ return f(e, em, v)
+}
+
+func encodeBool(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ b := cborFalse
+ if v.Bool() {
+ b = cborTrue
+ }
+ e.Write(b)
+ return nil
+}
+
+func encodeInt(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ i := v.Int()
+ if i >= 0 {
+ encodeHead(e, byte(cborTypePositiveInt), uint64(i))
+ return nil
+ }
+ i = i*(-1) - 1
+ encodeHead(e, byte(cborTypeNegativeInt), uint64(i))
+ return nil
+}
+
+func encodeUint(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ encodeHead(e, byte(cborTypePositiveInt), v.Uint())
+ return nil
+}
+
+func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ f64 := v.Float()
+ if math.IsNaN(f64) {
+ return encodeNaN(e, em, v)
+ }
+ if math.IsInf(f64, 0) {
+ return encodeInf(e, em, v)
+ }
+ fopt := em.shortestFloat
+ if v.Kind() == reflect.Float64 && (fopt == ShortestFloatNone || cannotFitFloat32(f64)) {
+ // Encode float64
+ // Don't use encodeFloat64() because it cannot be inlined.
+ const argumentSize = 8
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | byte(additionalInformationAsFloat64)
+ binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64))
+ e.Write(scratch[:])
+ return nil
+ }
+
+ f32 := float32(f64)
+ if fopt == ShortestFloat16 {
+ var f16 float16.Float16
+ p := float16.PrecisionFromfloat32(f32)
+ if p == float16.PrecisionExact {
+ // Roundtrip float32->float16->float32 test isn't needed.
+ f16 = float16.Fromfloat32(f32)
+ } else if p == float16.PrecisionUnknown {
+ // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16.
+ f16 = float16.Fromfloat32(f32)
+ if f16.Float32() == f32 {
+ p = float16.PrecisionExact
+ }
+ }
+ if p == float16.PrecisionExact {
+ // Encode float16
+ // Don't use encodeFloat16() because it cannot be inlined.
+ const argumentSize = 2
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16
+ binary.BigEndian.PutUint16(scratch[1:], uint16(f16))
+ e.Write(scratch[:])
+ return nil
+ }
+ }
+
+ // Encode float32
+ // Don't use encodeFloat32() because it cannot be inlined.
+ const argumentSize = 4
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32
+ binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32))
+ e.Write(scratch[:])
+ return nil
+}
+
+func encodeInf(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ f64 := v.Float()
+ switch em.infConvert {
+ case InfConvertReject:
+ return &UnsupportedValueError{msg: "floating-point infinity"}
+
+ case InfConvertFloat16:
+ if f64 > 0 {
+ e.Write(cborPositiveInfinity)
+ } else {
+ e.Write(cborNegativeInfinity)
+ }
+ return nil
+ }
+ if v.Kind() == reflect.Float64 {
+ return encodeFloat64(e, f64)
+ }
+ return encodeFloat32(e, float32(f64))
+}
+
+func encodeNaN(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ switch em.nanConvert {
+ case NaNConvert7e00:
+ e.Write(cborNaN)
+ return nil
+
+ case NaNConvertNone:
+ if v.Kind() == reflect.Float64 {
+ return encodeFloat64(e, v.Float())
+ }
+ f32 := float32NaNFromReflectValue(v)
+ return encodeFloat32(e, f32)
+
+ case NaNConvertReject:
+ return &UnsupportedValueError{msg: "floating-point NaN"}
+
+ default: // NaNConvertPreserveSignal, NaNConvertQuiet
+ if v.Kind() == reflect.Float64 {
+ f64 := v.Float()
+ f64bits := math.Float64bits(f64)
+ if em.nanConvert == NaNConvertQuiet && f64bits&(1<<51) == 0 {
+ f64bits |= 1 << 51 // Set quiet bit = 1
+ f64 = math.Float64frombits(f64bits)
+ }
+ // The lower 29 bits are dropped when converting from float64 to float32.
+ if f64bits&0x1fffffff != 0 {
+ // Encode NaN as float64 because dropped coef bits from float64 to float32 are not all 0s.
+ return encodeFloat64(e, f64)
+ }
+ // Create float32 from float64 manually because float32(f64) always turns on NaN's quiet bits.
+ sign := uint32(f64bits>>32) & (1 << 31)
+ exp := uint32(0x7f800000)
+ coef := uint32((f64bits & 0xfffffffffffff) >> 29)
+ f32bits := sign | exp | coef
+ f32 := math.Float32frombits(f32bits)
+ // The lower 13 bits are dropped when converting from float32 to float16.
+ if f32bits&0x1fff != 0 {
+ // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s.
+ return encodeFloat32(e, f32)
+ }
+ // Encode NaN as float16
+ f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN.
+ return encodeFloat16(e, f16)
+ }
+
+ f32 := float32NaNFromReflectValue(v)
+ f32bits := math.Float32bits(f32)
+ if em.nanConvert == NaNConvertQuiet && f32bits&(1<<22) == 0 {
+ f32bits |= 1 << 22 // Set quiet bit = 1
+ f32 = math.Float32frombits(f32bits)
+ }
+ // The lower 13 bits are dropped coef bits when converting from float32 to float16.
+ if f32bits&0x1fff != 0 {
+ // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s.
+ return encodeFloat32(e, f32)
+ }
+ f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN.
+ return encodeFloat16(e, f16)
+ }
+}
+
+func encodeFloat16(e *bytes.Buffer, f16 float16.Float16) error {
+ const argumentSize = 2
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16
+ binary.BigEndian.PutUint16(scratch[1:], uint16(f16))
+ e.Write(scratch[:])
+ return nil
+}
+
+func encodeFloat32(e *bytes.Buffer, f32 float32) error {
+ const argumentSize = 4
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32
+ binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32))
+ e.Write(scratch[:])
+ return nil
+}
+
+func encodeFloat64(e *bytes.Buffer, f64 float64) error {
+ const argumentSize = 8
+ const headSize = 1 + argumentSize
+ var scratch [headSize]byte
+ scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat64
+ binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64))
+ e.Write(scratch[:])
+ return nil
+}
+
+func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ vk := v.Kind()
+ if vk == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull {
+ e.Write(cborNil)
+ return nil
+ }
+ if vk == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && em.byteSliceLaterEncodingTag != 0 {
+ encodeHead(e, byte(cborTypeTag), em.byteSliceLaterEncodingTag)
+ }
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ slen := v.Len()
+ if slen == 0 {
+ return e.WriteByte(byte(cborTypeByteString))
+ }
+ encodeHead(e, byte(cborTypeByteString), uint64(slen))
+ if vk == reflect.Array {
+ for i := 0; i < slen; i++ {
+ e.WriteByte(byte(v.Index(i).Uint()))
+ }
+ return nil
+ }
+ e.Write(v.Bytes())
+ return nil
+}
+
+func encodeString(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ s := v.String()
+ encodeHead(e, byte(em.stringMajorType), uint64(len(s)))
+ e.WriteString(s)
+ return nil
+}
+
+type arrayEncodeFunc struct {
+ f encodeFunc
+}
+
+func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.byteArray == ByteArrayToByteSlice && v.Type().Elem().Kind() == reflect.Uint8 {
+ return encodeByteString(e, em, v)
+ }
+ if v.Kind() == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull {
+ e.Write(cborNil)
+ return nil
+ }
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ alen := v.Len()
+ if alen == 0 {
+ return e.WriteByte(byte(cborTypeArray))
+ }
+ encodeHead(e, byte(cborTypeArray), uint64(alen))
+ for i := 0; i < alen; i++ {
+ if err := ae.f(e, em, v.Index(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// encodeKeyValueFunc encodes key/value pairs in map (v).
+// If kvs is provided (having the same length as v), length of encoded key and value are stored in kvs.
+// kvs is used for canonical encoding of map.
+type encodeKeyValueFunc func(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error
+
+type mapEncodeFunc struct {
+ e encodeKeyValueFunc
+}
+
+func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if v.IsNil() && em.nilContainers == NilContainerAsNull {
+ e.Write(cborNil)
+ return nil
+ }
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+ mlen := v.Len()
+ if mlen == 0 {
+ return e.WriteByte(byte(cborTypeMap))
+ }
+
+ encodeHead(e, byte(cborTypeMap), uint64(mlen))
+ if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 {
+ return me.e(e, em, v, nil)
+ }
+
+ kvsp := getKeyValues(v.Len()) // for sorting keys
+ defer putKeyValues(kvsp)
+ kvs := *kvsp
+
+ kvBeginOffset := e.Len()
+ if err := me.e(e, em, v, kvs); err != nil {
+ return err
+ }
+ kvTotalLen := e.Len() - kvBeginOffset
+
+ // Use the capacity at the tail of the encode buffer as a staging area to rearrange the
+ // encoded pairs into sorted order.
+ e.Grow(kvTotalLen)
+ tmp := e.Bytes()[e.Len() : e.Len()+kvTotalLen] // Can use e.AvailableBuffer() in Go 1.21+.
+ dst := e.Bytes()[kvBeginOffset:]
+
+ if em.sort == SortBytewiseLexical {
+ sort.Sort(&bytewiseKeyValueSorter{kvs: kvs, data: dst})
+ } else {
+ sort.Sort(&lengthFirstKeyValueSorter{kvs: kvs, data: dst})
+ }
+
+ // This is where the encoded bytes are actually rearranged in the output buffer to reflect
+ // the desired order.
+ sortedOffset := 0
+ for _, kv := range kvs {
+ copy(tmp[sortedOffset:], dst[kv.offset:kv.nextOffset])
+ sortedOffset += kv.nextOffset - kv.offset
+ }
+ copy(dst, tmp[:kvTotalLen])
+
+ return nil
+
+}
+
+// keyValue is the position of an encoded pair in a buffer. All offsets are zero-based and relative
+// to the first byte of the first encoded pair.
+type keyValue struct {
+ offset int
+ valueOffset int
+ nextOffset int
+}
+
+type bytewiseKeyValueSorter struct {
+ kvs []keyValue
+ data []byte
+}
+
+func (x *bytewiseKeyValueSorter) Len() int {
+ return len(x.kvs)
+}
+
+func (x *bytewiseKeyValueSorter) Swap(i, j int) {
+ x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i]
+}
+
+func (x *bytewiseKeyValueSorter) Less(i, j int) bool {
+ kvi, kvj := x.kvs[i], x.kvs[j]
+ return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0
+}
+
+type lengthFirstKeyValueSorter struct {
+ kvs []keyValue
+ data []byte
+}
+
+func (x *lengthFirstKeyValueSorter) Len() int {
+ return len(x.kvs)
+}
+
+func (x *lengthFirstKeyValueSorter) Swap(i, j int) {
+ x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i]
+}
+
+func (x *lengthFirstKeyValueSorter) Less(i, j int) bool {
+ kvi, kvj := x.kvs[i], x.kvs[j]
+ if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 {
+ return keyLengthDifference < 0
+ }
+ return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0
+}
+
+var keyValuePool = sync.Pool{}
+
+func getKeyValues(length int) *[]keyValue {
+ v := keyValuePool.Get()
+ if v == nil {
+ y := make([]keyValue, length)
+ return &y
+ }
+ x := v.(*[]keyValue)
+ if cap(*x) >= length {
+ *x = (*x)[:length]
+ return x
+ }
+ // []keyValue from the pool does not have enough capacity.
+ // Return it back to the pool and create a new one.
+ keyValuePool.Put(x)
+ y := make([]keyValue, length)
+ return &y
+}
+
+func putKeyValues(x *[]keyValue) {
+ *x = (*x)[:0]
+ keyValuePool.Put(x)
+}
+
+func encodeStructToArray(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) {
+ structType, err := getEncodingStructType(v.Type())
+ if err != nil {
+ return err
+ }
+
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+
+ flds := structType.fields
+
+ encodeHead(e, byte(cborTypeArray), uint64(len(flds)))
+ for i := 0; i < len(flds); i++ {
+ f := flds[i]
+
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ // Get embedded field value. No error is expected.
+ fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) {
+ // Write CBOR nil for null pointer to embedded struct
+ e.Write(cborNil)
+ return reflect.Value{}, nil
+ })
+ if !fv.IsValid() {
+ continue
+ }
+ }
+
+ if err := f.ef(e, em, fv); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) {
+ structType, err := getEncodingStructType(v.Type())
+ if err != nil {
+ return err
+ }
+
+ flds := structType.getFields(em)
+
+ start := 0
+ if em.sort == SortFastShuffle && len(flds) > 0 {
+ start = rand.Intn(len(flds)) //nolint:gosec // Don't need a CSPRNG for deck cutting.
+ }
+
+ if b := em.encTagBytes(v.Type()); b != nil {
+ e.Write(b)
+ }
+
+ // Encode head with struct field count.
+ // Head is rewritten later if actual encoded field count is different from struct field count.
+ encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds)))
+
+ kvbegin := e.Len()
+ kvcount := 0
+ for offset := 0; offset < len(flds); offset++ {
+ f := flds[(start+offset)%len(flds)]
+
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ // Get embedded field value. No error is expected.
+ fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) {
+ // Skip null pointer to embedded struct
+ return reflect.Value{}, nil
+ })
+ if !fv.IsValid() {
+ continue
+ }
+ }
+ if f.omitEmpty {
+ empty, err := f.ief(em, fv)
+ if err != nil {
+ return err
+ }
+ if empty {
+ continue
+ }
+ }
+ if f.omitZero {
+ zero, err := f.izf(fv)
+ if err != nil {
+ return err
+ }
+ if zero {
+ continue
+ }
+ }
+
+ if !f.keyAsInt && em.fieldName == FieldNameToByteString {
+ e.Write(f.cborNameByteString)
+ } else { // int or text string
+ e.Write(f.cborName)
+ }
+
+ if err := f.ef(e, em, fv); err != nil {
+ return err
+ }
+
+ kvcount++
+ }
+
+ if len(flds) == kvcount {
+ // Encoded element count in head is the same as actual element count.
+ return nil
+ }
+
+ // Overwrite the bytes that were reserved for the head before encoding the map entries.
+ var actualHeadLen int
+ {
+ headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin])
+ actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount))
+ }
+
+ if actualHeadLen == encodedHeadLen {
+ // The bytes reserved for the encoded head were exactly the right size, so the
+ // encoded entries are already in their final positions.
+ return nil
+ }
+
+ // We reserved more bytes than needed for the encoded head, based on the number of fields
+ // encoded. The encoded entries are offset to the right by the number of excess reserved
+ // bytes. Shift the entries left to remove the gap.
+ excessReservedBytes := encodedHeadLen - actualHeadLen
+ dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes]
+ src := e.Bytes()[kvbegin:e.Len()]
+ copy(dst, src)
+
+ // After shifting, the excess bytes are at the end of the output buffer and they are
+ // garbage.
+ e.Truncate(e.Len() - excessReservedBytes)
+ return nil
+}
+
+func encodeIntf(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if v.IsNil() {
+ e.Write(cborNil)
+ return nil
+ }
+ return encode(e, em, v.Elem())
+}
+
+func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ t := v.Interface().(time.Time)
+ if t.IsZero() {
+ e.Write(cborNil) // Even if tag is required, encode as CBOR null.
+ return nil
+ }
+ if em.timeTag == EncTagRequired {
+ tagNumber := 1
+ if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano {
+ tagNumber = 0
+ }
+ encodeHead(e, byte(cborTypeTag), uint64(tagNumber))
+ }
+ switch em.time {
+ case TimeUnix:
+ secs := t.Unix()
+ return encodeInt(e, em, reflect.ValueOf(secs))
+
+ case TimeUnixMicro:
+ t = t.UTC().Round(time.Microsecond)
+ f := float64(t.UnixNano()) / 1e9
+ return encodeFloat(e, em, reflect.ValueOf(f))
+
+ case TimeUnixDynamic:
+ t = t.UTC().Round(time.Microsecond)
+ secs, nsecs := t.Unix(), uint64(t.Nanosecond())
+ if nsecs == 0 {
+ return encodeInt(e, em, reflect.ValueOf(secs))
+ }
+ f := float64(secs) + float64(nsecs)/1e9
+ return encodeFloat(e, em, reflect.ValueOf(f))
+
+ case TimeRFC3339:
+ s := t.Format(time.RFC3339)
+ return encodeString(e, em, reflect.ValueOf(s))
+
+ default: // TimeRFC3339Nano
+ s := t.Format(time.RFC3339Nano)
+ return encodeString(e, em, reflect.ValueOf(s))
+ }
+}
+
+func encodeBigInt(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.bigIntConvert == BigIntConvertReject {
+ return &UnsupportedTypeError{Type: typeBigInt}
+ }
+
+ vbi := v.Interface().(big.Int)
+ sign := vbi.Sign()
+ bi := new(big.Int).SetBytes(vbi.Bytes()) // bi is absolute value of v
+ if sign < 0 {
+ // For negative number, convert to CBOR encoded number (-v-1).
+ bi.Sub(bi, big.NewInt(1))
+ }
+
+ if em.bigIntConvert == BigIntConvertShortest {
+ if bi.IsUint64() {
+ if sign >= 0 {
+ // Encode as CBOR pos int (major type 0)
+ encodeHead(e, byte(cborTypePositiveInt), bi.Uint64())
+ return nil
+ }
+ // Encode as CBOR neg int (major type 1)
+ encodeHead(e, byte(cborTypeNegativeInt), bi.Uint64())
+ return nil
+ }
+ }
+
+ tagNum := 2
+ if sign < 0 {
+ tagNum = 3
+ }
+ // Write tag number
+ encodeHead(e, byte(cborTypeTag), uint64(tagNum))
+ // Write bignum byte string
+ b := bi.Bytes()
+ encodeHead(e, byte(cborTypeByteString), uint64(len(b)))
+ e.Write(b)
+ return nil
+}
+
+type binaryMarshalerEncoder struct {
+ alternateEncode encodeFunc
+ alternateIsEmpty isEmptyFunc
+}
+
+func (bme binaryMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.binaryMarshaler != BinaryMarshalerByteString {
+ return bme.alternateEncode(e, em, v)
+ }
+
+ vt := v.Type()
+ m, ok := v.Interface().(encoding.BinaryMarshaler)
+ if !ok {
+ pv := reflect.New(vt)
+ pv.Elem().Set(v)
+ m = pv.Interface().(encoding.BinaryMarshaler)
+ }
+ data, err := m.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ if b := em.encTagBytes(vt); b != nil {
+ e.Write(b)
+ }
+ encodeHead(e, byte(cborTypeByteString), uint64(len(data)))
+ e.Write(data)
+ return nil
+}
+
+func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) {
+ if em.binaryMarshaler != BinaryMarshalerByteString {
+ return bme.alternateIsEmpty(em, v)
+ }
+
+ m, ok := v.Interface().(encoding.BinaryMarshaler)
+ if !ok {
+ pv := reflect.New(v.Type())
+ pv.Elem().Set(v)
+ m = pv.Interface().(encoding.BinaryMarshaler)
+ }
+ data, err := m.MarshalBinary()
+ if err != nil {
+ return false, err
+ }
+ return len(data) == 0, nil
+}
+
+type textMarshalerEncoder struct {
+ alternateEncode encodeFunc
+ alternateIsEmpty isEmptyFunc
+}
+
+func (tme textMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.textMarshaler == TextMarshalerNone {
+ return tme.alternateEncode(e, em, v)
+ }
+
+ vt := v.Type()
+ m, ok := v.Interface().(encoding.TextMarshaler)
+ if !ok {
+ pv := reflect.New(vt)
+ pv.Elem().Set(v)
+ m = pv.Interface().(encoding.TextMarshaler)
+ }
+ data, err := m.MarshalText()
+ if err != nil {
+ return fmt.Errorf("cbor: cannot marshal text for %s: %w", vt, err)
+ }
+ if b := em.encTagBytes(vt); b != nil {
+ e.Write(b)
+ }
+
+ encodeHead(e, byte(cborTypeTextString), uint64(len(data)))
+ e.Write(data)
+ return nil
+}
+
+func (tme textMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) {
+ if em.textMarshaler == TextMarshalerNone {
+ return tme.alternateIsEmpty(em, v)
+ }
+
+ m, ok := v.Interface().(encoding.TextMarshaler)
+ if !ok {
+ pv := reflect.New(v.Type())
+ pv.Elem().Set(v)
+ m = pv.Interface().(encoding.TextMarshaler)
+ }
+ data, err := m.MarshalText()
+ if err != nil {
+ return false, fmt.Errorf("cbor: cannot marshal text for %s: %w", v.Type(), err)
+ }
+ return len(data) == 0, nil
+}
+
+type jsonMarshalerEncoder struct {
+ alternateEncode encodeFunc
+ alternateIsEmpty isEmptyFunc
+}
+
+func (jme jsonMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.jsonMarshalerTranscoder == nil {
+ return jme.alternateEncode(e, em, v)
+ }
+
+ vt := v.Type()
+ m, ok := v.Interface().(jsonMarshaler)
+ if !ok {
+ pv := reflect.New(vt)
+ pv.Elem().Set(v)
+ m = pv.Interface().(jsonMarshaler)
+ }
+
+ json, err := m.MarshalJSON()
+ if err != nil {
+ return err
+ }
+
+ offset := e.Len()
+
+ if b := em.encTagBytes(vt); b != nil {
+ e.Write(b)
+ }
+
+ if err := em.jsonMarshalerTranscoder.Transcode(e, bytes.NewReader(json)); err != nil {
+ return &TranscodeError{err: err, rtype: vt, sourceFormat: "json", targetFormat: "cbor"}
+ }
+
+ // Validate that the transcode function has written exactly one well-formed data item.
+ d := decoder{data: e.Bytes()[offset:], dm: getMarshalerDecMode(em.indefLength, em.tagsMd)}
+ if err := d.wellformed(false, true); err != nil {
+ e.Truncate(offset)
+ return &TranscodeError{err: err, rtype: vt, sourceFormat: "json", targetFormat: "cbor"}
+ }
+
+ return nil
+}
+
+func (jme jsonMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) {
+ if em.jsonMarshalerTranscoder == nil {
+ return jme.alternateIsEmpty(em, v)
+ }
+
+ // As with types implementing cbor.Marshaler, transcoded json.Marshaler values always encode
+ // as exactly one complete CBOR data item.
+ return false, nil
+}
+
+func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.tagsMd == TagsForbidden && v.Type() == typeRawTag {
+ return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden")
+ }
+ m, ok := v.Interface().(Marshaler)
+ if !ok {
+ pv := reflect.New(v.Type())
+ pv.Elem().Set(v)
+ m = pv.Interface().(Marshaler)
+ }
+ data, err := m.MarshalCBOR()
+ if err != nil {
+ return err
+ }
+
+ // Verify returned CBOR data item from MarshalCBOR() is well-formed and passes tag validity for builtin tags 0-3.
+ d := decoder{data: data, dm: getMarshalerDecMode(em.indefLength, em.tagsMd)}
+ err = d.wellformed(false, true)
+ if err != nil {
+ return &MarshalerError{typ: v.Type(), err: err}
+ }
+
+ e.Write(data)
+ return nil
+}
+
+func encodeTag(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ if em.tagsMd == TagsForbidden {
+ return errors.New("cbor: cannot encode cbor.Tag when TagsMd is TagsForbidden")
+ }
+
+ t := v.Interface().(Tag)
+
+ if t.Number == 0 && t.Content == nil {
+ // Marshal uninitialized cbor.Tag
+ e.Write(cborNil)
+ return nil
+ }
+
+ // Marshal tag number
+ encodeHead(e, byte(cborTypeTag), t.Number)
+
+ vem := *em // shallow copy
+
+ // For built-in tags, disable settings that may introduce tag validity errors when
+ // marshaling certain Content values.
+ switch t.Number {
+ case tagNumRFC3339Time:
+ vem.stringType = StringToTextString
+ vem.stringMajorType = cborTypeTextString
+ case tagNumUnsignedBignum, tagNumNegativeBignum:
+ vem.byteSliceLaterFormat = ByteSliceLaterFormatNone
+ vem.byteSliceLaterEncodingTag = 0
+ }
+
+ // Marshal tag content
+ return encode(e, &vem, reflect.ValueOf(t.Content))
+}
+
+// encodeHead writes CBOR head of specified type t and returns number of bytes written.
+func encodeHead(e *bytes.Buffer, t byte, n uint64) int {
+ if n <= maxAdditionalInformationWithoutArgument {
+ const headSize = 1
+ e.WriteByte(t | byte(n))
+ return headSize
+ }
+
+ if n <= math.MaxUint8 {
+ const headSize = 2
+ scratch := [headSize]byte{
+ t | byte(additionalInformationWith1ByteArgument),
+ byte(n),
+ }
+ e.Write(scratch[:])
+ return headSize
+ }
+
+ if n <= math.MaxUint16 {
+ const headSize = 3
+ var scratch [headSize]byte
+ scratch[0] = t | byte(additionalInformationWith2ByteArgument)
+ binary.BigEndian.PutUint16(scratch[1:], uint16(n))
+ e.Write(scratch[:])
+ return headSize
+ }
+
+ if n <= math.MaxUint32 {
+ const headSize = 5
+ var scratch [headSize]byte
+ scratch[0] = t | byte(additionalInformationWith4ByteArgument)
+ binary.BigEndian.PutUint32(scratch[1:], uint32(n))
+ e.Write(scratch[:])
+ return headSize
+ }
+
+ const headSize = 9
+ var scratch [headSize]byte
+ scratch[0] = t | byte(additionalInformationWith8ByteArgument)
+ binary.BigEndian.PutUint64(scratch[1:], n)
+ e.Write(scratch[:])
+ return headSize
+}
+
+type jsonMarshaler interface{ MarshalJSON() ([]byte, error) }
+
+var (
+ typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
+ typeTextMarshaler = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+ typeJSONMarshaler = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
+ typeRawMessage = reflect.TypeOf(RawMessage(nil))
+ typeByteString = reflect.TypeOf(ByteString(""))
+)
+
+func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc, izf isZeroFunc) {
+ k := t.Kind()
+ if k == reflect.Pointer {
+ return getEncodeIndirectValueFunc(t), isEmptyPtr, getIsZeroFunc(t)
+ }
+ switch t {
+ case typeSimpleValue:
+ return encodeMarshalerType, isEmptyUint, getIsZeroFunc(t)
+
+ case typeTag:
+ return encodeTag, alwaysNotEmpty, getIsZeroFunc(t)
+
+ case typeTime:
+ return encodeTime, alwaysNotEmpty, getIsZeroFunc(t)
+
+ case typeBigInt:
+ return encodeBigInt, alwaysNotEmpty, getIsZeroFunc(t)
+
+ case typeRawMessage:
+ return encodeMarshalerType, isEmptySlice, getIsZeroFunc(t)
+
+ case typeByteString:
+ return encodeMarshalerType, isEmptyString, getIsZeroFunc(t)
+ }
+ if reflect.PointerTo(t).Implements(typeMarshaler) {
+ return encodeMarshalerType, alwaysNotEmpty, getIsZeroFunc(t)
+ }
+ if reflect.PointerTo(t).Implements(typeBinaryMarshaler) {
+ defer func() {
+ // capture encoding method used for modes that disable BinaryMarshaler
+ bme := binaryMarshalerEncoder{
+ alternateEncode: ef,
+ alternateIsEmpty: ief,
+ }
+ ef = bme.encode
+ ief = bme.isEmpty
+ }()
+ }
+ if reflect.PointerTo(t).Implements(typeTextMarshaler) {
+ defer func() {
+ // capture encoding method used for modes that disable TextMarshaler
+ tme := textMarshalerEncoder{
+ alternateEncode: ef,
+ alternateIsEmpty: ief,
+ }
+ ef = tme.encode
+ ief = tme.isEmpty
+ }()
+ }
+ if reflect.PointerTo(t).Implements(typeJSONMarshaler) {
+ defer func() {
+ // capture encoding method used for modes that don't support transcoding
+ // from types that implement json.Marshaler.
+ jme := jsonMarshalerEncoder{
+ alternateEncode: ef,
+ alternateIsEmpty: ief,
+ }
+ ef = jme.encode
+ ief = jme.isEmpty
+ }()
+ }
+
+ switch k {
+ case reflect.Bool:
+ return encodeBool, isEmptyBool, getIsZeroFunc(t)
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return encodeInt, isEmptyInt, getIsZeroFunc(t)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return encodeUint, isEmptyUint, getIsZeroFunc(t)
+
+ case reflect.Float32, reflect.Float64:
+ return encodeFloat, isEmptyFloat, getIsZeroFunc(t)
+
+ case reflect.String:
+ return encodeString, isEmptyString, getIsZeroFunc(t)
+
+ case reflect.Slice:
+ if t.Elem().Kind() == reflect.Uint8 {
+ return encodeByteString, isEmptySlice, getIsZeroFunc(t)
+ }
+ fallthrough
+
+ case reflect.Array:
+ f, _, _ := getEncodeFunc(t.Elem())
+ if f == nil {
+ return nil, nil, nil
+ }
+ return arrayEncodeFunc{f: f}.encode, isEmptySlice, getIsZeroFunc(t)
+
+ case reflect.Map:
+ f := getEncodeMapFunc(t)
+ if f == nil {
+ return nil, nil, nil
+ }
+ return f, isEmptyMap, getIsZeroFunc(t)
+
+ case reflect.Struct:
+ // Get struct's special field "_" tag options
+ if f, ok := t.FieldByName("_"); ok {
+ tag := f.Tag.Get("cbor")
+ if tag != "-" {
+ if hasToArrayOption(tag) {
+ return encodeStructToArray, isEmptyStruct, isZeroFieldStruct
+ }
+ }
+ }
+ return encodeStruct, isEmptyStruct, getIsZeroFunc(t)
+
+ case reflect.Interface:
+ return encodeIntf, isEmptyIntf, getIsZeroFunc(t)
+ }
+ return nil, nil, nil
+}
+
+func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc {
+ for t.Kind() == reflect.Pointer {
+ t = t.Elem()
+ }
+ f, _, _ := getEncodeFunc(t)
+ if f == nil {
+ return nil
+ }
+ return func(e *bytes.Buffer, em *encMode, v reflect.Value) error {
+ for v.Kind() == reflect.Pointer && !v.IsNil() {
+ v = v.Elem()
+ }
+ if v.Kind() == reflect.Pointer && v.IsNil() {
+ e.Write(cborNil)
+ return nil
+ }
+ return f(e, em, v)
+ }
+}
+
+func alwaysNotEmpty(_ *encMode, _ reflect.Value) (empty bool, err error) {
+ return false, nil
+}
+
+func isEmptyBool(_ *encMode, v reflect.Value) (bool, error) {
+ return !v.Bool(), nil
+}
+
+func isEmptyInt(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Int() == 0, nil
+}
+
+func isEmptyUint(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Uint() == 0, nil
+}
+
+func isEmptyFloat(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Float() == 0.0, nil
+}
+
+func isEmptyString(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Len() == 0, nil
+}
+
+func isEmptySlice(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Len() == 0, nil
+}
+
+func isEmptyMap(_ *encMode, v reflect.Value) (bool, error) {
+ return v.Len() == 0, nil
+}
+
+func isEmptyPtr(_ *encMode, v reflect.Value) (bool, error) {
+ return v.IsNil(), nil
+}
+
+func isEmptyIntf(_ *encMode, v reflect.Value) (bool, error) {
+ return v.IsNil(), nil
+}
+
+func isEmptyStruct(em *encMode, v reflect.Value) (bool, error) {
+ structType, err := getEncodingStructType(v.Type())
+ if err != nil {
+ return false, err
+ }
+
+ if em.omitEmpty == OmitEmptyGoValue {
+ return false, nil
+ }
+
+ if structType.toArray {
+ return len(structType.fields) == 0, nil
+ }
+
+ if len(structType.fields) > len(structType.omitEmptyFieldsIdx) {
+ return false, nil
+ }
+
+ for _, i := range structType.omitEmptyFieldsIdx {
+ f := structType.fields[i]
+
+ // Get field value
+ var fv reflect.Value
+ if len(f.idx) == 1 {
+ fv = v.Field(f.idx[0])
+ } else {
+ // Get embedded field value. No error is expected.
+ fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) {
+ // Skip null pointer to embedded struct
+ return reflect.Value{}, nil
+ })
+ if !fv.IsValid() {
+ continue
+ }
+ }
+
+ empty, err := f.ief(em, fv)
+ if err != nil {
+ return false, err
+ }
+ if !empty {
+ return false, nil
+ }
+ }
+ return true, nil
+}
+
+func cannotFitFloat32(f64 float64) bool {
+ f32 := float32(f64)
+ return float64(f32) != f64
+}
+
+// float32NaNFromReflectValue extracts float32 NaN from reflect.Value while preserving NaN's quiet bit.
+func float32NaNFromReflectValue(v reflect.Value) float32 {
+ // Keith Randall's workaround for issue https://github.com/golang/go/issues/36400
+ p := reflect.New(v.Type())
+ p.Elem().Set(v)
+ f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32)
+ return f32
+}
+
+type isZeroer interface {
+ IsZero() bool
+}
+
+var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem()
+
+// getIsZeroFunc returns a function for the given type that can be called to determine if a given value is zero.
+// Types that implement `IsZero() bool` are delegated to for non-nil values.
+// Types that do not implement `IsZero() bool` use the reflect.Value#IsZero() implementation.
+// The returned function matches behavior of stdlib encoding/json behavior in Go 1.24+.
+func getIsZeroFunc(t reflect.Type) isZeroFunc {
+ // Provide a function that uses a type's IsZero method if defined.
+ switch {
+ case t == nil:
+ return isZeroDefault
+ case t.Kind() == reflect.Interface && t.Implements(isZeroerType):
+ return isZeroInterfaceCustom
+ case t.Kind() == reflect.Pointer && t.Implements(isZeroerType):
+ return isZeroPointerCustom
+ case t.Implements(isZeroerType):
+ return isZeroCustom
+ case reflect.PointerTo(t).Implements(isZeroerType):
+ return isZeroAddrCustom
+ default:
+ return isZeroDefault
+ }
+}
+
+// isZeroInterfaceCustom returns true for nil or pointer-to-nil values,
+// and delegates to the custom IsZero() implementation otherwise.
+func isZeroInterfaceCustom(v reflect.Value) (bool, error) {
+ kind := v.Kind()
+
+ switch kind {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Pointer, reflect.Interface, reflect.Slice:
+ if v.IsNil() {
+ return true, nil
+ }
+ }
+
+ switch kind {
+ case reflect.Interface, reflect.Pointer:
+ if elem := v.Elem(); elem.Kind() == reflect.Pointer && elem.IsNil() {
+ return true, nil
+ }
+ }
+
+ return v.Interface().(isZeroer).IsZero(), nil
+}
+
+// isZeroPointerCustom returns true for nil values,
+// and delegates to the custom IsZero() implementation otherwise.
+func isZeroPointerCustom(v reflect.Value) (bool, error) {
+ if v.IsNil() {
+ return true, nil
+ }
+ return v.Interface().(isZeroer).IsZero(), nil
+}
+
+// isZeroCustom delegates to the custom IsZero() implementation.
+func isZeroCustom(v reflect.Value) (bool, error) {
+ return v.Interface().(isZeroer).IsZero(), nil
+}
+
+// isZeroAddrCustom delegates to the custom IsZero() implementation of the addr of the value.
+func isZeroAddrCustom(v reflect.Value) (bool, error) {
+ if !v.CanAddr() {
+ // Temporarily box v so we can take the address.
+ v2 := reflect.New(v.Type()).Elem()
+ v2.Set(v)
+ v = v2
+ }
+ return v.Addr().Interface().(isZeroer).IsZero(), nil
+}
+
+// isZeroDefault calls reflect.Value#IsZero()
+func isZeroDefault(v reflect.Value) (bool, error) {
+ if !v.IsValid() {
+ // v is zero value
+ return true, nil
+ }
+ return v.IsZero(), nil
+}
+
+// isZeroFieldStruct is used to determine whether to omit toarray structs
+func isZeroFieldStruct(v reflect.Value) (bool, error) {
+ structType, err := getEncodingStructType(v.Type())
+ if err != nil {
+ return false, err
+ }
+ return len(structType.fields) == 0, nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map.go b/vendor/github.com/fxamacker/cbor/v2/encode_map.go
new file mode 100644
index 000000000..2871bfdab
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/encode_map.go
@@ -0,0 +1,92 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "reflect"
+ "sync"
+)
+
+type mapKeyValueEncodeFunc struct {
+ kf, ef encodeFunc
+ kpool, vpool sync.Pool
+}
+
+func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
+ iterk := me.kpool.Get().(*reflect.Value)
+ defer func() {
+ iterk.SetZero()
+ me.kpool.Put(iterk)
+ }()
+ iterv := me.vpool.Get().(*reflect.Value)
+ defer func() {
+ iterv.SetZero()
+ me.vpool.Put(iterv)
+ }()
+
+ if kvs == nil {
+ for i, iter := 0, v.MapRange(); iter.Next(); i++ {
+ iterk.SetIterKey(iter)
+ iterv.SetIterValue(iter)
+
+ if err := me.kf(e, em, *iterk); err != nil {
+ return err
+ }
+ if err := me.ef(e, em, *iterv); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ initial := e.Len()
+ for i, iter := 0, v.MapRange(); iter.Next(); i++ {
+ iterk.SetIterKey(iter)
+ iterv.SetIterValue(iter)
+
+ offset := e.Len()
+ if err := me.kf(e, em, *iterk); err != nil {
+ return err
+ }
+ valueOffset := e.Len()
+ if err := me.ef(e, em, *iterv); err != nil {
+ return err
+ }
+ kvs[i] = keyValue{
+ offset: offset - initial,
+ valueOffset: valueOffset - initial,
+ nextOffset: e.Len() - initial,
+ }
+ }
+
+ return nil
+}
+
+func getEncodeMapFunc(t reflect.Type) encodeFunc {
+ kf, _, _ := getEncodeFunc(t.Key())
+ ef, _, _ := getEncodeFunc(t.Elem())
+ if kf == nil || ef == nil {
+ return nil
+ }
+ mkv := &mapKeyValueEncodeFunc{
+ kf: kf,
+ ef: ef,
+ kpool: sync.Pool{
+ New: func() any {
+ rk := reflect.New(t.Key()).Elem()
+ return &rk
+ },
+ },
+ vpool: sync.Pool{
+ New: func() any {
+ rv := reflect.New(t.Elem()).Elem()
+ return &rv
+ },
+ },
+ }
+ return mapEncodeFunc{
+ e: mkv.encodeKeyValues,
+ }.encode
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go b/vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go
new file mode 100644
index 000000000..c893a411d
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go
@@ -0,0 +1,8 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+//go:build go1.24
+
+package cbor
+
+var jsonStdlibSupportsOmitzero = true
diff --git a/vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go b/vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go
new file mode 100644
index 000000000..db86a6321
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go
@@ -0,0 +1,8 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+//go:build !go1.24
+
+package cbor
+
+var jsonStdlibSupportsOmitzero = false
diff --git a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go
new file mode 100644
index 000000000..30f72814f
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go
@@ -0,0 +1,98 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// SimpleValue represents CBOR simple value.
+// CBOR simple value is:
+// - an extension point like CBOR tag.
+// - a subset of CBOR major type 7 that isn't floating-point.
+// - "identified by a number between 0 and 255, but distinct from that number itself".
+// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key.
+//
+// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined".
+// Other CBOR simple values are currently unassigned/reserved by IANA.
+type SimpleValue uint8
+
+var (
+ typeSimpleValue = reflect.TypeOf(SimpleValue(0))
+)
+
+// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7).
+func (sv SimpleValue) MarshalCBOR() ([]byte, error) {
+ // RFC 8949 3.3. Floating-Point Numbers and Values with No Content says:
+ // "An encoder MUST NOT issue two-byte sequences that start with 0xf8
+ // (major type 7, additional information 24) and continue with a byte
+ // less than 0x20 (32 decimal). Such sequences are not well-formed.
+ // (This implies that an encoder cannot encode false, true, null, or
+ // undefined in two-byte sequences and that only the one-byte variants
+ // of these are well-formed; more generally speaking, each simple value
+ // only has a single representation variant)."
+
+ switch {
+ case sv <= maxSimpleValueInAdditionalInformation:
+ return []byte{byte(cborTypePrimitives) | byte(sv)}, nil
+
+ case sv >= minSimpleValueIn1ByteArgument:
+ return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil
+
+ default:
+ return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)}
+ }
+}
+
+// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
+//
+// Deprecated: No longer used by this codec; kept for compatibility
+// with user apps that directly call this function.
+func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
+ if sv == nil {
+ return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ // Check well-formedness of CBOR data item.
+ // SimpleValue.UnmarshalCBOR() is exported, so
+ // the codec needs to support same behavior for:
+ // - Unmarshal(data, *SimpleValue)
+ // - SimpleValue.UnmarshalCBOR(data)
+ err := d.wellformed(false, false)
+ if err != nil {
+ return err
+ }
+
+ return sv.unmarshalCBOR(data)
+}
+
+// unmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
+// This function assumes data is well-formed, and does not perform bounds checking.
+// This function is called by Unmarshal().
+func (sv *SimpleValue) unmarshalCBOR(data []byte) error {
+ if sv == nil {
+ return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ typ, ai, val := d.getHead()
+
+ if typ != cborTypePrimitives {
+ return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"}
+ }
+ if ai > additionalInformationWith1ByteArgument {
+ return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"}
+ }
+
+ // It is safe to cast val to uint8 here because
+ // - data is already verified to be well-formed CBOR simple value and
+ // - val is <= math.MaxUint8.
+ *sv = SimpleValue(val)
+ return nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/stream.go b/vendor/github.com/fxamacker/cbor/v2/stream.go
new file mode 100644
index 000000000..7ac6d7d67
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/stream.go
@@ -0,0 +1,277 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "reflect"
+)
+
+// Decoder reads and decodes CBOR values from io.Reader.
+type Decoder struct {
+ r io.Reader
+ d decoder
+ buf []byte
+ off int // next read offset in buf
+ bytesRead int
+}
+
+// NewDecoder returns a new decoder that reads and decodes from r using
+// the default decoding options.
+func NewDecoder(r io.Reader) *Decoder {
+ return defaultDecMode.NewDecoder(r)
+}
+
+// Decode reads CBOR value and decodes it into the value pointed to by v.
+func (dec *Decoder) Decode(v any) error {
+ _, err := dec.readNext()
+ if err != nil {
+ // Return validation error or read error.
+ return err
+ }
+
+ dec.d.reset(dec.buf[dec.off:])
+ err = dec.d.value(v)
+
+ // Increment dec.off even if decoding err is not nil because
+ // dec.d.off points to the next CBOR data item if current
+ // CBOR data item is valid but failed to be decoded into v.
+ // This allows next CBOR data item to be decoded in next
+ // call to this function.
+ dec.off += dec.d.off
+ dec.bytesRead += dec.d.off
+
+ return err
+}
+
+// Skip skips to the next CBOR data item (if there is any),
+// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc.
+func (dec *Decoder) Skip() error {
+ n, err := dec.readNext()
+ if err != nil {
+ // Return validation error or read error.
+ return err
+ }
+
+ dec.off += n
+ dec.bytesRead += n
+ return nil
+}
+
+// NumBytesRead returns the number of bytes read.
+func (dec *Decoder) NumBytesRead() int {
+ return dec.bytesRead
+}
+
+// Buffered returns a reader for data remaining in Decoder's buffer.
+// Returned reader is valid until the next call to Decode or Skip.
+func (dec *Decoder) Buffered() io.Reader {
+ return bytes.NewReader(dec.buf[dec.off:])
+}
+
+// readNext() reads next CBOR data item from Reader to buffer.
+// It returns the size of next CBOR data item.
+// It also returns validation error or read error if any.
+func (dec *Decoder) readNext() (int, error) {
+ var readErr error
+ var validErr error
+
+ for {
+ // Process any unread data in dec.buf.
+ if dec.off < len(dec.buf) {
+ dec.d.reset(dec.buf[dec.off:])
+ off := dec.off // Save offset before data validation
+ validErr = dec.d.wellformed(true, false)
+ dec.off = off // Restore offset
+
+ if validErr == nil {
+ return dec.d.off, nil
+ }
+
+ if validErr != io.ErrUnexpectedEOF {
+ return 0, validErr
+ }
+
+ // Process last read error on io.ErrUnexpectedEOF.
+ if readErr != nil {
+ if readErr == io.EOF {
+ // current CBOR data item is incomplete.
+ return 0, io.ErrUnexpectedEOF
+ }
+ return 0, readErr
+ }
+ }
+
+ // More data is needed and there was no read error.
+ var n int
+ for n == 0 {
+ n, readErr = dec.read()
+ if n == 0 && readErr != nil {
+ // No more data can be read and read error is encountered.
+ // At this point, validErr is either nil or io.ErrUnexpectedEOF.
+ if readErr == io.EOF {
+ if validErr == io.ErrUnexpectedEOF {
+ // current CBOR data item is incomplete.
+ return 0, io.ErrUnexpectedEOF
+ }
+ }
+ return 0, readErr
+ }
+ }
+
+ // At this point, dec.buf contains new data from last read (n > 0).
+ }
+}
+
+// read() reads data from Reader to buffer.
+// It returns number of bytes read and any read error encountered.
+// Postconditions:
+// - dec.buf contains previously unread data and new data.
+// - dec.off is 0.
+func (dec *Decoder) read() (int, error) {
+ // Grow buf if needed.
+ const minRead = 512
+ if cap(dec.buf)-len(dec.buf)+dec.off < minRead {
+ oldUnreadBuf := dec.buf[dec.off:]
+ dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead)
+ dec.overwriteBuf(oldUnreadBuf)
+ }
+
+ // Copy unread data over read data and reset off to 0.
+ if dec.off > 0 {
+ dec.overwriteBuf(dec.buf[dec.off:])
+ }
+
+ // Read from reader and reslice buf.
+ n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
+ dec.buf = dec.buf[0 : len(dec.buf)+n]
+ return n, err
+}
+
+func (dec *Decoder) overwriteBuf(newBuf []byte) {
+ n := copy(dec.buf, newBuf)
+ dec.buf = dec.buf[:n]
+ dec.off = 0
+}
+
+// Encoder writes CBOR values to io.Writer.
+type Encoder struct {
+ w io.Writer
+ em *encMode
+ indefTypes []cborType
+}
+
+// NewEncoder returns a new encoder that writes to w using the default encoding options.
+func NewEncoder(w io.Writer) *Encoder {
+ return defaultEncMode.NewEncoder(w)
+}
+
+// Encode writes the CBOR encoding of v.
+func (enc *Encoder) Encode(v any) error {
+ if len(enc.indefTypes) > 0 && v != nil {
+ indefType := enc.indefTypes[len(enc.indefTypes)-1]
+ if indefType == cborTypeTextString {
+ k := reflect.TypeOf(v).Kind()
+ if k != reflect.String {
+ return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string")
+ }
+ } else if indefType == cborTypeByteString {
+ t := reflect.TypeOf(v)
+ k := t.Kind()
+ if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 {
+ return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string")
+ }
+ }
+ }
+
+ buf := getEncodeBuffer()
+
+ err := encode(buf, enc.em, reflect.ValueOf(v))
+ if err == nil {
+ _, err = enc.w.Write(buf.Bytes())
+ }
+
+ putEncodeBuffer(buf)
+ return err
+}
+
+// StartIndefiniteByteString starts byte string encoding of indefinite length.
+// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings
+// ("chunks") as one contiguous string until EndIndefinite is called.
+func (enc *Encoder) StartIndefiniteByteString() error {
+ return enc.startIndefinite(cborTypeByteString)
+}
+
+// StartIndefiniteTextString starts text string encoding of indefinite length.
+// Subsequent calls of (*Encoder).Encode() encodes definite length text strings
+// ("chunks") as one contiguous string until EndIndefinite is called.
+func (enc *Encoder) StartIndefiniteTextString() error {
+ return enc.startIndefinite(cborTypeTextString)
+}
+
+// StartIndefiniteArray starts array encoding of indefinite length.
+// Subsequent calls of (*Encoder).Encode() encodes elements of the array
+// until EndIndefinite is called.
+func (enc *Encoder) StartIndefiniteArray() error {
+ return enc.startIndefinite(cborTypeArray)
+}
+
+// StartIndefiniteMap starts array encoding of indefinite length.
+// Subsequent calls of (*Encoder).Encode() encodes elements of the map
+// until EndIndefinite is called.
+func (enc *Encoder) StartIndefiniteMap() error {
+ return enc.startIndefinite(cborTypeMap)
+}
+
+// EndIndefinite closes last opened indefinite length value.
+func (enc *Encoder) EndIndefinite() error {
+ if len(enc.indefTypes) == 0 {
+ return errors.New("cbor: cannot encode \"break\" code outside indefinite length values")
+ }
+ _, err := enc.w.Write([]byte{cborBreakFlag})
+ if err == nil {
+ enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1]
+ }
+ return err
+}
+
+var cborIndefHeader = map[cborType][]byte{
+ cborTypeByteString: {cborByteStringWithIndefiniteLengthHead},
+ cborTypeTextString: {cborTextStringWithIndefiniteLengthHead},
+ cborTypeArray: {cborArrayWithIndefiniteLengthHead},
+ cborTypeMap: {cborMapWithIndefiniteLengthHead},
+}
+
+func (enc *Encoder) startIndefinite(typ cborType) error {
+ if enc.em.indefLength == IndefLengthForbidden {
+ return &IndefiniteLengthError{typ}
+ }
+ _, err := enc.w.Write(cborIndefHeader[typ])
+ if err == nil {
+ enc.indefTypes = append(enc.indefTypes, typ)
+ }
+ return err
+}
+
+// RawMessage is a raw encoded CBOR value.
+type RawMessage []byte
+
+// MarshalCBOR returns m or CBOR nil if m is nil.
+func (m RawMessage) MarshalCBOR() ([]byte, error) {
+ if len(m) == 0 {
+ return cborNil, nil
+ }
+ return m, nil
+}
+
+// UnmarshalCBOR creates a copy of data and saves to *m.
+func (m *RawMessage) UnmarshalCBOR(data []byte) error {
+ if m == nil {
+ return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer")
+ }
+ *m = append((*m)[0:0], data...)
+ return nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/structfields.go b/vendor/github.com/fxamacker/cbor/v2/structfields.go
new file mode 100644
index 000000000..cf0a922cd
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/structfields.go
@@ -0,0 +1,268 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "reflect"
+ "sort"
+ "strings"
+)
+
+type field struct {
+ name string
+ nameAsInt int64 // used to decoder to match field name with CBOR int
+ cborName []byte
+ cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3
+ idx []int
+ typ reflect.Type
+ ef encodeFunc
+ ief isEmptyFunc
+ izf isZeroFunc
+ typInfo *typeInfo // used to decoder to reuse type info
+ tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields)
+ omitEmpty bool // used to skip empty field
+ omitZero bool // used to skip zero field
+ keyAsInt bool // used to encode/decode field name as int
+}
+
+type fields []*field
+
+// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth.
+type indexFieldSorter struct {
+ fields fields
+}
+
+func (x *indexFieldSorter) Len() int {
+ return len(x.fields)
+}
+
+func (x *indexFieldSorter) Swap(i, j int) {
+ x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
+}
+
+func (x *indexFieldSorter) Less(i, j int) bool {
+ iIdx, jIdx := x.fields[i].idx, x.fields[j].idx
+ for k := 0; k < len(iIdx) && k < len(jIdx); k++ {
+ if iIdx[k] != jIdx[k] {
+ return iIdx[k] < jIdx[k]
+ }
+ }
+ return len(iIdx) <= len(jIdx)
+}
+
+// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag.
+type nameLevelAndTagFieldSorter struct {
+ fields fields
+}
+
+func (x *nameLevelAndTagFieldSorter) Len() int {
+ return len(x.fields)
+}
+
+func (x *nameLevelAndTagFieldSorter) Swap(i, j int) {
+ x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
+}
+
+func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool {
+ fi, fj := x.fields[i], x.fields[j]
+ if fi.name != fj.name {
+ return fi.name < fj.name
+ }
+ if len(fi.idx) != len(fj.idx) {
+ return len(fi.idx) < len(fj.idx)
+ }
+ if fi.tagged != fj.tagged {
+ return fi.tagged
+ }
+ return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters.
+}
+
+// getFields returns visible fields of struct type t following visibility rules for JSON encoding.
+func getFields(t reflect.Type) (flds fields, structOptions string) {
+ // Get special field "_" tag options
+ if f, ok := t.FieldByName("_"); ok {
+ tag := f.Tag.Get("cbor")
+ if tag != "-" {
+ structOptions = tag
+ }
+ }
+
+ // nTypes contains next level anonymous fields' types and indexes
+ // (there can be multiple fields of the same type at the same level)
+ flds, nTypes := appendFields(t, nil, nil, nil)
+
+ if len(nTypes) > 0 {
+
+ var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes
+ vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels
+
+ for len(nTypes) > 0 {
+ cTypes, nTypes = nTypes, nil
+
+ for t, idx := range cTypes {
+ // If there are multiple anonymous fields of the same struct type at the same level, all are ignored.
+ if len(idx) > 1 {
+ continue
+ }
+
+ // Anonymous field of the same type at deeper nested level is ignored.
+ if vTypes[t] {
+ continue
+ }
+ vTypes[t] = true
+
+ flds, nTypes = appendFields(t, idx[0], flds, nTypes)
+ }
+ }
+ }
+
+ sort.Sort(&nameLevelAndTagFieldSorter{flds})
+
+ // Keep visible fields.
+ j := 0 // index of next unique field
+ for i := 0; i < len(flds); {
+ name := flds[i].name
+ if i == len(flds)-1 || // last field
+ name != flds[i+1].name || // field i has unique field name
+ len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1
+ (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not
+ flds[j] = flds[i]
+ j++
+ }
+
+ // Skip fields with the same field name.
+ for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive
+ }
+ }
+ if j != len(flds) {
+ flds = flds[:j]
+ }
+
+ // Sort fields by field index
+ sort.Sort(&indexFieldSorter{flds})
+
+ return flds, structOptions
+}
+
+// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes .
+func appendFields(
+ t reflect.Type,
+ idx []int,
+ flds fields,
+ nTypes map[reflect.Type][][]int,
+) (
+ _flds fields,
+ _nTypes map[reflect.Type][][]int,
+) {
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ ft := f.Type
+ for ft.Kind() == reflect.Pointer {
+ ft = ft.Elem()
+ }
+
+ if !isFieldExportable(f, ft.Kind()) {
+ continue
+ }
+
+ cborTag := true
+ tag := f.Tag.Get("cbor")
+ if tag == "" {
+ tag = f.Tag.Get("json")
+ cborTag = false
+ }
+ if tag == "-" {
+ continue
+ }
+
+ tagged := tag != ""
+
+ // Parse field tag options
+ var tagFieldName string
+ var omitempty, omitzero, keyasint bool
+ for j := 0; tag != ""; j++ {
+ var token string
+ idx := strings.IndexByte(tag, ',')
+ if idx == -1 {
+ token, tag = tag, ""
+ } else {
+ token, tag = tag[:idx], tag[idx+1:]
+ }
+ if j == 0 {
+ tagFieldName = token
+ } else {
+ switch token {
+ case "omitempty":
+ omitempty = true
+ case "omitzero":
+ if cborTag || jsonStdlibSupportsOmitzero {
+ omitzero = true
+ }
+ case "keyasint":
+ keyasint = true
+ }
+ }
+ }
+
+ fieldName := tagFieldName
+ if tagFieldName == "" {
+ fieldName = f.Name
+ }
+
+ fIdx := make([]int, len(idx)+1)
+ copy(fIdx, idx)
+ fIdx[len(fIdx)-1] = i
+
+ if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" {
+ flds = append(flds, &field{
+ name: fieldName,
+ idx: fIdx,
+ typ: f.Type,
+ omitEmpty: omitempty,
+ omitZero: omitzero,
+ keyAsInt: keyasint,
+ tagged: tagged})
+ } else {
+ if nTypes == nil {
+ nTypes = make(map[reflect.Type][][]int)
+ }
+ nTypes[ft] = append(nTypes[ft], fIdx)
+ }
+ }
+
+ return flds, nTypes
+}
+
+// isFieldExportable returns true if f is an exportable (regular or anonymous) field or
+// a nonexportable anonymous field of struct type.
+// Nonexportable anonymous field of struct type can contain exportable fields.
+func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam
+ return f.IsExported() || (f.Anonymous && fk == reflect.Struct)
+}
+
+type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error)
+
+// getFieldValue returns field value of struct v by index. When encountering null pointer
+// to anonymous (embedded) struct field, f is called with the last traversed field value.
+func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) {
+ fv = v
+ for i, n := range idx {
+ fv = fv.Field(n)
+
+ if i < len(idx)-1 {
+ if fv.Kind() == reflect.Pointer && fv.Type().Elem().Kind() == reflect.Struct {
+ if fv.IsNil() {
+ // Null pointer to embedded struct field
+ fv, err = f(fv)
+ if err != nil || !fv.IsValid() {
+ return fv, err
+ }
+ }
+ fv = fv.Elem()
+ }
+ }
+ }
+ return fv, nil
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/tag.go b/vendor/github.com/fxamacker/cbor/v2/tag.go
new file mode 100644
index 000000000..bd8b773f5
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/tag.go
@@ -0,0 +1,329 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+// Tag represents a tagged data item (CBOR major type 6), comprising a tag number and the unmarshaled tag content.
+// NOTE: The same encoding and decoding options that apply to untagged CBOR data items also applies to tag content
+// during encoding and decoding.
+type Tag struct {
+ Number uint64
+ Content any
+}
+
+// RawTag represents a tagged data item (CBOR major type 6), comprising a tag number and the raw tag content.
+// The raw tag content (enclosed data item) is a CBOR-encoded data item.
+// RawTag can be used to delay decoding a CBOR data item or precompute encoding a CBOR data item.
+type RawTag struct {
+ Number uint64
+ Content RawMessage
+}
+
+// UnmarshalCBOR sets *t with the tag number and the raw tag content copied from data.
+//
+// Deprecated: No longer used by this codec; kept for compatibility
+// with user apps that directly call this function.
+func (t *RawTag) UnmarshalCBOR(data []byte) error {
+ if t == nil {
+ return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ // Check if data is a well-formed CBOR data item.
+ // RawTag.UnmarshalCBOR() is exported, so
+ // the codec needs to support same behavior for:
+ // - Unmarshal(data, *RawTag)
+ // - RawTag.UnmarshalCBOR(data)
+ err := d.wellformed(false, false)
+ if err != nil {
+ return err
+ }
+
+ return t.unmarshalCBOR(data)
+}
+
+// unmarshalCBOR sets *t with the tag number and the raw tag content copied from data.
+// This function assumes data is well-formed, and does not perform bounds checking.
+// This function is called by Unmarshal().
+func (t *RawTag) unmarshalCBOR(data []byte) error {
+ if t == nil {
+ return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
+ }
+
+ // Decoding CBOR null and undefined to cbor.RawTag is no-op.
+ if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
+ return nil
+ }
+
+ d := decoder{data: data, dm: defaultDecMode}
+
+ // Unmarshal tag number.
+ typ, _, num := d.getHead()
+ if typ != cborTypeTag {
+ return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()}
+ }
+ t.Number = num
+
+ // Unmarshal tag content.
+ c := d.data[d.off:]
+ t.Content = make([]byte, len(c))
+ copy(t.Content, c)
+ return nil
+}
+
+// MarshalCBOR returns CBOR encoding of t.
+func (t RawTag) MarshalCBOR() ([]byte, error) {
+ if t.Number == 0 && len(t.Content) == 0 {
+ // Marshal uninitialized cbor.RawTag
+ b := make([]byte, len(cborNil))
+ copy(b, cborNil)
+ return b, nil
+ }
+
+ e := getEncodeBuffer()
+
+ encodeHead(e, byte(cborTypeTag), t.Number)
+
+ content := t.Content
+ if len(content) == 0 {
+ content = cborNil
+ }
+
+ buf := make([]byte, len(e.Bytes())+len(content))
+ n := copy(buf, e.Bytes())
+ copy(buf[n:], content)
+
+ putEncodeBuffer(e)
+ return buf, nil
+}
+
+// DecTagMode specifies how decoder handles tag number.
+type DecTagMode int
+
+const (
+ // DecTagIgnored makes decoder ignore tag number (skips if present).
+ DecTagIgnored DecTagMode = iota
+
+ // DecTagOptional makes decoder verify tag number if it's present.
+ DecTagOptional
+
+ // DecTagRequired makes decoder verify tag number and tag number must be present.
+ DecTagRequired
+
+ maxDecTagMode
+)
+
+func (dtm DecTagMode) valid() bool {
+ return dtm >= 0 && dtm < maxDecTagMode
+}
+
+// EncTagMode specifies how encoder handles tag number.
+type EncTagMode int
+
+const (
+ // EncTagNone makes encoder not encode tag number.
+ EncTagNone EncTagMode = iota
+
+ // EncTagRequired makes encoder encode tag number.
+ EncTagRequired
+
+ maxEncTagMode
+)
+
+func (etm EncTagMode) valid() bool {
+ return etm >= 0 && etm < maxEncTagMode
+}
+
+// TagOptions specifies how encoder and decoder handle tag number.
+type TagOptions struct {
+ DecTag DecTagMode
+ EncTag EncTagMode
+}
+
+// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode
+// to provide CBOR tag support.
+type TagSet interface {
+ // Add adds given tag number(s), content type, and tag options to TagSet.
+ Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error
+
+ // Remove removes given tag content type from TagSet.
+ Remove(contentType reflect.Type)
+
+ tagProvider
+}
+
+type tagProvider interface {
+ getTagItemFromType(t reflect.Type) *tagItem
+ getTypeFromTagNum(num []uint64) reflect.Type
+}
+
+type tagItem struct {
+ num []uint64
+ cborTagNum []byte
+ contentType reflect.Type
+ opts TagOptions
+}
+
+func (t *tagItem) equalTagNum(num []uint64) bool {
+ // Fast path to compare 1 tag number
+ if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] {
+ return true
+ }
+
+ if len(t.num) != len(num) {
+ return false
+ }
+
+ for i := 0; i < len(t.num); i++ {
+ if t.num[i] != num[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+type (
+ tagSet map[reflect.Type]*tagItem
+
+ syncTagSet struct {
+ sync.RWMutex
+ t tagSet
+ }
+)
+
+func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem {
+ return t[typ]
+}
+
+func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type {
+ for typ, tag := range t {
+ if tag.equalTagNum(num) {
+ return typ
+ }
+ }
+ return nil
+}
+
+// NewTagSet returns TagSet (safe for concurrency).
+func NewTagSet() TagSet {
+ return &syncTagSet{t: make(map[reflect.Type]*tagItem)}
+}
+
+// Add adds given tag number(s), content type, and tag options to TagSet.
+func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error {
+ if contentType == nil {
+ return errors.New("cbor: cannot add nil content type to TagSet")
+ }
+ for contentType.Kind() == reflect.Pointer {
+ contentType = contentType.Elem()
+ }
+ tag, err := newTagItem(opts, contentType, num, nestedNum...)
+ if err != nil {
+ return err
+ }
+ t.Lock()
+ defer t.Unlock()
+ for typ, ti := range t.t {
+ if typ == contentType {
+ return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet")
+ }
+ if ti.equalTagNum(tag.num) {
+ return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num)
+ }
+ }
+ t.t[contentType] = tag
+ return nil
+}
+
+// Remove removes given tag content type from TagSet.
+func (t *syncTagSet) Remove(contentType reflect.Type) {
+ for contentType.Kind() == reflect.Pointer {
+ contentType = contentType.Elem()
+ }
+ t.Lock()
+ delete(t.t, contentType)
+ t.Unlock()
+}
+
+func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem {
+ t.RLock()
+ ti := t.t[typ]
+ t.RUnlock()
+ return ti
+}
+
+func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type {
+ t.RLock()
+ rt := t.t.getTypeFromTagNum(num)
+ t.RUnlock()
+ return rt
+}
+
+func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) {
+ if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone {
+ return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet")
+ }
+ if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface {
+ return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String())
+ }
+ if contentType == typeTime {
+ return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
+ }
+ if contentType == typeBigInt {
+ return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically")
+ }
+ if contentType == typeTag {
+ return nil, errors.New("cbor: cannot add cbor.Tag to TagSet")
+ }
+ if contentType == typeRawTag {
+ return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet")
+ }
+ if num == 0 || num == 1 {
+ return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
+ }
+ if num == 2 || num == 3 {
+ return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically")
+ }
+ if num == tagNumSelfDescribedCBOR {
+ return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically")
+ }
+
+ te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType}
+ te.num = append(te.num, nestedNum...)
+
+ // Cache encoded tag numbers
+ e := getEncodeBuffer()
+ for _, n := range te.num {
+ encodeHead(e, byte(cborTypeTag), n)
+ }
+ te.cborTagNum = make([]byte, e.Len())
+ copy(te.cborTagNum, e.Bytes())
+ putEncodeBuffer(e)
+
+ return &te, nil
+}
+
+var (
+ typeTag = reflect.TypeOf(Tag{})
+ typeRawTag = reflect.TypeOf(RawTag{})
+)
+
+// WrongTagError describes mismatch between CBOR tag and registered tag.
+type WrongTagError struct {
+ RegisteredType reflect.Type
+ RegisteredTagNum []uint64
+ TagNum []uint64
+}
+
+func (e *WrongTagError) Error() string {
+ return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum)
+}
diff --git a/vendor/github.com/fxamacker/cbor/v2/valid.go b/vendor/github.com/fxamacker/cbor/v2/valid.go
new file mode 100644
index 000000000..b40793b95
--- /dev/null
+++ b/vendor/github.com/fxamacker/cbor/v2/valid.go
@@ -0,0 +1,394 @@
+// Copyright (c) Faye Amacker. All rights reserved.
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+package cbor
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+ "math"
+ "strconv"
+
+ "github.com/x448/float16"
+)
+
+// SyntaxError is a description of a CBOR syntax error.
+type SyntaxError struct {
+ msg string
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// SemanticError is a description of a CBOR semantic error.
+type SemanticError struct {
+ msg string
+}
+
+func (e *SemanticError) Error() string { return e.msg }
+
+// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags.
+type MaxNestedLevelError struct {
+ maxNestedLevels int
+}
+
+func (e *MaxNestedLevelError) Error() string {
+ return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels)
+}
+
+// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays.
+type MaxArrayElementsError struct {
+ maxArrayElements int
+}
+
+func (e *MaxArrayElementsError) Error() string {
+ return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array"
+}
+
+// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps.
+type MaxMapPairsError struct {
+ maxMapPairs int
+}
+
+func (e *MaxMapPairsError) Error() string {
+ return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map"
+}
+
+// IndefiniteLengthError indicates found disallowed indefinite length items.
+type IndefiniteLengthError struct {
+ t cborType
+}
+
+func (e *IndefiniteLengthError) Error() string {
+ return "cbor: indefinite-length " + e.t.String() + " isn't allowed"
+}
+
+// TagsMdError indicates found disallowed CBOR tags.
+type TagsMdError struct {
+}
+
+func (e *TagsMdError) Error() string {
+ return "cbor: CBOR tag isn't allowed"
+}
+
+// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item.
+type ExtraneousDataError struct {
+ numOfBytes int // number of bytes of extraneous data
+ index int // location of extraneous data
+}
+
+func (e *ExtraneousDataError) Error() string {
+ return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index)
+}
+
+// wellformed checks whether the CBOR data item is well-formed.
+// allowExtraData indicates if extraneous data is allowed after the CBOR data item.
+// - use allowExtraData = true when using Decoder.Decode()
+// - use allowExtraData = false when using Unmarshal()
+func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error {
+ if len(d.data) == d.off {
+ return io.EOF
+ }
+ _, err := d.wellformedInternal(0, checkBuiltinTags)
+ if err == nil {
+ if !allowExtraData && d.off != len(d.data) {
+ err = &ExtraneousDataError{len(d.data) - d.off, d.off}
+ }
+ }
+ return err
+}
+
+// wellformedInternal checks data's well-formedness and returns max depth and error.
+func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo
+ t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag()
+ if err != nil {
+ return 0, err
+ }
+
+ switch t {
+ case cborTypeByteString, cborTypeTextString:
+ if indefiniteLength {
+ if d.dm.indefLength == IndefLengthForbidden {
+ return 0, &IndefiniteLengthError{t}
+ }
+ return d.wellformedIndefiniteString(t, depth, checkBuiltinTags)
+ }
+ valInt := int(val)
+ if valInt < 0 {
+ // Detect integer overflow
+ return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow")
+ }
+ if len(d.data)-d.off < valInt { // valInt+off may overflow integer
+ return 0, io.ErrUnexpectedEOF
+ }
+ d.off += valInt
+
+ case cborTypeArray, cborTypeMap:
+ depth++
+ if depth > d.dm.maxNestedLevels {
+ return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
+ }
+
+ if indefiniteLength {
+ if d.dm.indefLength == IndefLengthForbidden {
+ return 0, &IndefiniteLengthError{t}
+ }
+ return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags)
+ }
+
+ valInt := int(val)
+ if valInt < 0 {
+ // Detect integer overflow
+ return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow")
+ }
+
+ if t == cborTypeArray {
+ if valInt > d.dm.maxArrayElements {
+ return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
+ }
+ } else {
+ if valInt > d.dm.maxMapPairs {
+ return 0, &MaxMapPairsError{d.dm.maxMapPairs}
+ }
+ }
+
+ count := 1
+ if t == cborTypeMap {
+ count = 2
+ }
+ maxDepth := depth
+ for j := 0; j < count; j++ {
+ for i := 0; i < valInt; i++ {
+ var dpt int
+ if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
+ return 0, err
+ }
+ if dpt > maxDepth {
+ maxDepth = dpt // Save max depth
+ }
+ }
+ }
+ depth = maxDepth
+
+ case cborTypeTag:
+ if d.dm.tagsMd == TagsForbidden {
+ return 0, &TagsMdError{}
+ }
+
+ tagNum := val
+
+ // Scan nested tag numbers to avoid recursion.
+ for {
+ if len(d.data) == d.off { // Tag number must be followed by tag content.
+ return 0, io.ErrUnexpectedEOF
+ }
+ if checkBuiltinTags {
+ err = validBuiltinTag(tagNum, d.data[d.off])
+ if err != nil {
+ return 0, err
+ }
+ }
+ if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) {
+ return 0, &UnacceptableDataItemError{
+ CBORType: cborTypeTag.String(),
+ Message: "bignum",
+ }
+ }
+ if getType(d.data[d.off]) != cborTypeTag {
+ break
+ }
+ if _, _, tagNum, err = d.wellformedHead(); err != nil {
+ return 0, err
+ }
+ depth++
+ if depth > d.dm.maxNestedLevels {
+ return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
+ }
+ }
+ // Check tag content.
+ return d.wellformedInternal(depth, checkBuiltinTags)
+ }
+
+ return depth, nil
+}
+
+// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error.
+func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) {
+ var err error
+ for {
+ if len(d.data) == d.off {
+ return 0, io.ErrUnexpectedEOF
+ }
+ if isBreakFlag(d.data[d.off]) {
+ d.off++
+ break
+ }
+ // Peek ahead to get next type and indefinite length status.
+ nt, ai := parseInitialByte(d.data[d.off])
+ if t != nt {
+ return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()}
+ }
+ if additionalInformation(ai).isIndefiniteLength() {
+ return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"}
+ }
+ if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
+ return 0, err
+ }
+ }
+ return depth, nil
+}
+
+// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error.
+func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) {
+ var err error
+ maxDepth := depth
+ i := 0
+ for {
+ if len(d.data) == d.off {
+ return 0, io.ErrUnexpectedEOF
+ }
+ if isBreakFlag(d.data[d.off]) {
+ d.off++
+ break
+ }
+ var dpt int
+ if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
+ return 0, err
+ }
+ if dpt > maxDepth {
+ maxDepth = dpt
+ }
+ i++
+ if t == cborTypeArray {
+ if i > d.dm.maxArrayElements {
+ return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
+ }
+ } else {
+ if i%2 == 0 && i/2 > d.dm.maxMapPairs {
+ return 0, &MaxMapPairsError{d.dm.maxMapPairs}
+ }
+ }
+ }
+ if t == cborTypeMap && i%2 == 1 {
+ return 0, &SyntaxError{"cbor: unexpected \"break\" code"}
+ }
+ return maxDepth, nil
+}
+
+func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() (
+ t cborType,
+ ai byte,
+ val uint64,
+ indefiniteLength bool,
+ err error,
+) {
+ t, ai, val, err = d.wellformedHead()
+ if err != nil {
+ return
+ }
+ indefiniteLength = additionalInformation(ai).isIndefiniteLength()
+ return
+}
+
+func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) {
+ dataLen := len(d.data) - d.off
+ if dataLen == 0 {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+
+ t, ai = parseInitialByte(d.data[d.off])
+ val = uint64(ai)
+ d.off++
+ dataLen--
+
+ if ai <= maxAdditionalInformationWithoutArgument {
+ return t, ai, val, nil
+ }
+
+ if ai == additionalInformationWith1ByteArgument {
+ const argumentSize = 1
+ if dataLen < argumentSize {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+ val = uint64(d.data[d.off])
+ d.off++
+ if t == cborTypePrimitives && val < 32 {
+ return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()}
+ }
+ return t, ai, val, nil
+ }
+
+ if ai == additionalInformationWith2ByteArgument {
+ const argumentSize = 2
+ if dataLen < argumentSize {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+ val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize]))
+ d.off += argumentSize
+ if t == cborTypePrimitives {
+ if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil {
+ return 0, 0, 0, err
+ }
+ }
+ return t, ai, val, nil
+ }
+
+ if ai == additionalInformationWith4ByteArgument {
+ const argumentSize = 4
+ if dataLen < argumentSize {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+ val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize]))
+ d.off += argumentSize
+ if t == cborTypePrimitives {
+ if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil {
+ return 0, 0, 0, err
+ }
+ }
+ return t, ai, val, nil
+ }
+
+ if ai == additionalInformationWith8ByteArgument {
+ const argumentSize = 8
+ if dataLen < argumentSize {
+ return 0, 0, 0, io.ErrUnexpectedEOF
+ }
+ val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize])
+ d.off += argumentSize
+ if t == cborTypePrimitives {
+ if err := d.acceptableFloat(math.Float64frombits(val)); err != nil {
+ return 0, 0, 0, err
+ }
+ }
+ return t, ai, val, nil
+ }
+
+ if additionalInformation(ai).isIndefiniteLength() {
+ switch t {
+ case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag:
+ return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
+ case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite().
+ return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"}
+ }
+ return t, ai, val, nil
+ }
+
+ // ai == 28, 29, 30
+ return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
+}
+
+func (d *decoder) acceptableFloat(f float64) error {
+ switch {
+ case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f):
+ return &UnacceptableDataItemError{
+ CBORType: cborTypePrimitives.String(),
+ Message: "floating-point NaN",
+ }
+ case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0):
+ return &UnacceptableDataItemError{
+ CBORType: cborTypePrimitives.String(),
+ Message: "floating-point infinity",
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
new file mode 100644
index 000000000..22f8d21cc
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
@@ -0,0 +1,61 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ golint:
+ min-confidence: 0
+ gocyclo:
+ min-complexity: 45
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md
index 813788aff..0108f1d57 100644
--- a/vendor/github.com/go-openapi/jsonpointer/README.md
+++ b/vendor/github.com/go-openapi/jsonpointer/README.md
@@ -1,6 +1,10 @@
-# gojsonpointer [](https://travis-ci.org/go-openapi/jsonpointer) [](https://codecov.io/gh/go-openapi/jsonpointer) [](https://slackin.goswagger.io)
+# gojsonpointer [](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonpointer)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/jsonpointer)
+[](https://goreportcard.com/report/github.com/go-openapi/jsonpointer)
-[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonpointer)
An implementation of JSON Pointer - Go language
## Status
diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go
index 7df9853de..d970c7cf4 100644
--- a/vendor/github.com/go-openapi/jsonpointer/pointer.go
+++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go
@@ -26,6 +26,7 @@
package jsonpointer
import (
+ "encoding/json"
"errors"
"fmt"
"reflect"
@@ -40,6 +41,7 @@ const (
pointerSeparator = `/`
invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
+ notFound = `Can't find the pointer in the document`
)
var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
@@ -48,13 +50,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem()
// JSONPointable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONPointable interface {
- JSONLookup(string) (interface{}, error)
+ JSONLookup(string) (any, error)
}
// JSONSetable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONSetable interface {
- JSONSet(string, interface{}) error
+ JSONSet(string, any) error
}
// New creates a new json pointer for the given string
@@ -81,9 +83,7 @@ func (p *Pointer) parse(jsonPointerString string) error {
err = errors.New(invalidStart)
} else {
referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
- for _, referenceToken := range referenceTokens[1:] {
- p.referenceTokens = append(p.referenceTokens, referenceToken)
- }
+ p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...)
}
}
@@ -91,38 +91,58 @@ func (p *Pointer) parse(jsonPointerString string) error {
}
// Get uses the pointer to retrieve a value from a JSON document
-func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
+func (p *Pointer) Get(document any) (any, reflect.Kind, error) {
return p.get(document, swag.DefaultJSONNameProvider)
}
// Set uses the pointer to set a value from a JSON document
-func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) {
+func (p *Pointer) Set(document any, value any) (any, error) {
return document, p.set(document, value, swag.DefaultJSONNameProvider)
}
// GetForToken gets a value for a json pointer token 1 level deep
-func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) {
+func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) {
return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
}
// SetForToken gets a value for a json pointer token 1 level deep
-func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) {
+func SetForToken(document any, decodedToken string, value any) (any, error) {
return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
}
-func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+func isNil(input any) bool {
+ if input == nil {
+ return true
+ }
+
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
+func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
rValue := reflect.Indirect(reflect.ValueOf(node))
kind := rValue.Kind()
+ if isNil(node) {
+ return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken)
+ }
- if rValue.Type().Implements(jsonPointableType) {
- r, err := node.(JSONPointable).JSONLookup(decodedToken)
+ switch typed := node.(type) {
+ case JSONPointable:
+ r, err := typed.JSONLookup(decodedToken)
if err != nil {
return nil, kind, err
}
return r, kind, nil
+ case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect
+ return getSingleImpl(*typed, decodedToken, nameProvider)
}
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -159,7 +179,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam
}
-func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error {
+func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error {
rValue := reflect.Indirect(reflect.ValueOf(node))
if ns, ok := node.(JSONSetable); ok { // pointer impl
@@ -170,7 +190,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw
return node.(JSONSetable).JSONSet(decodedToken, data)
}
- switch rValue.Kind() {
+ switch rValue.Kind() { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -210,7 +230,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw
}
-func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
if nameProvider == nil {
nameProvider = swag.DefaultJSONNameProvider
@@ -231,8 +251,7 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf
if err != nil {
return nil, knd, err
}
- node, kind = r, knd
-
+ node = r
}
rValue := reflect.ValueOf(node)
@@ -241,11 +260,11 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf
return node, kind, nil
}
-func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error {
+func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
knd := reflect.ValueOf(node).Kind()
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
- return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
+ return errors.New("only structs, pointers, maps and slices are supported for setting values")
}
if nameProvider == nil {
@@ -284,7 +303,7 @@ func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) e
continue
}
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -363,6 +382,128 @@ func (p *Pointer) String() string {
return pointerString
}
+func (p *Pointer) Offset(document string) (int64, error) {
+ dec := json.NewDecoder(strings.NewReader(document))
+ var offset int64
+ for _, ttk := range p.DecodedTokens() {
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ offset, err = offsetSingleObject(dec, ttk)
+ if err != nil {
+ return 0, err
+ }
+ case '[':
+ offset, err = offsetSingleArray(dec, ttk)
+ if err != nil {
+ return 0, err
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ }
+ return offset, nil
+}
+
+func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) {
+ for dec.More() {
+ offset := dec.InputOffset()
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ }
+ case string:
+ if tk == decodedToken {
+ return offset, nil
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ }
+ return 0, fmt.Errorf("token reference %q not found", decodedToken)
+}
+
+func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
+ idx, err := strconv.Atoi(decodedToken)
+ if err != nil {
+ return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err)
+ }
+ var i int
+ for i = 0; i < idx && dec.More(); i++ {
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ }
+ }
+ }
+
+ if !dec.More() {
+ return 0, fmt.Errorf("token reference %q not found", decodedToken)
+ }
+ return dec.InputOffset(), nil
+}
+
+// drainSingle drains a single level of object or array.
+// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed.
+func drainSingle(dec *json.Decoder) error {
+ for dec.More() {
+ tk, err := dec.Token()
+ if err != nil {
+ return err
+ }
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ // Consumes the ending delim
+ if _, err := dec.Token(); err != nil {
+ return err
+ }
+ return nil
+}
+
// Specific JSON pointer encoding here
// ~0 => ~
// ~1 => /
@@ -377,14 +518,14 @@ const (
// Unescape unescapes a json pointer reference token string to the original representation
func Unescape(token string) string {
- step1 := strings.Replace(token, encRefTok1, decRefTok1, -1)
- step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1)
+ step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1)
+ step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0)
return step2
}
// Escape escapes a pointer reference token string
func Escape(token string) string {
- step1 := strings.Replace(token, decRefTok0, encRefTok0, -1)
- step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1)
+ step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0)
+ step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1)
return step2
}
diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore
index d69b53acc..c4b1b64f0 100644
--- a/vendor/github.com/go-openapi/swag/.gitignore
+++ b/vendor/github.com/go-openapi/swag/.gitignore
@@ -2,3 +2,4 @@ secrets.yml
vendor
Godeps
.idea
+*.out
diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml
index bf503e400..80e2be004 100644
--- a/vendor/github.com/go-openapi/swag/.golangci.yml
+++ b/vendor/github.com/go-openapi/swag/.golangci.yml
@@ -4,14 +4,14 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
- min-complexity: 25
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 3
- min-occurrences: 2
+ min-occurrences: 3
linters:
enable-all: true
@@ -20,35 +20,41 @@ linters:
- lll
- gochecknoinits
- gochecknoglobals
- - nlreturn
- - testpackage
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
- wrapcheck
+ - testpackage
+ - nlreturn
- gomnd
- - exhaustive
- exhaustivestruct
- goerr113
- - wsl
- - whitespace
- - gofumpt
- - godot
+ - errorlint
- nestif
- - godox
- - funlen
- - gci
- - gocognit
+ - godot
+ - gofumpt
- paralleltest
+ - tparallel
- thelper
- ifshort
- - gomoddirectives
- - cyclop
- - forcetypeassert
- - ireturn
- - tagliatelle
- - varnamelen
- - goimports
- - tenv
- - golint
- exhaustruct
- - nilnil
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
- nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
- nosnakecase
diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md
new file mode 100644
index 000000000..e7f28ed6b
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md
@@ -0,0 +1,52 @@
+# Benchmarks
+
+## Name mangling utilities
+
+```bash
+go test -bench XXX -run XXX -benchtime 30s
+```
+
+### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op
+BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op
+BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op
+```
+
+### Benchmarks after PR #79
+
+~ x10 performance improvement and ~ /100 memory allocations.
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op
+```
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op
+```
diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md
index 217f6fa50..a72922299 100644
--- a/vendor/github.com/go-openapi/swag/README.md
+++ b/vendor/github.com/go-openapi/swag/README.md
@@ -1,7 +1,8 @@
-# Swag [](https://travis-ci.org/go-openapi/swag) [](https://codecov.io/gh/go-openapi/swag) [](https://slackin.goswagger.io)
+# Swag [](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/swag)
+[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE)
-[](http://godoc.org/github.com/go-openapi/swag)
+[](https://pkg.go.dev/github.com/go-openapi/swag)
[](https://goreportcard.com/report/github.com/go-openapi/swag)
Contains a bunch of helper functions for go-openapi and go-swagger projects.
@@ -18,4 +19,5 @@ You may also use it standalone for your projects.
This repo has only few dependencies outside of the standard library:
-* YAML utilities depend on gopkg.in/yaml.v2
+* YAML utilities depend on `gopkg.in/yaml.v3`
+* `github.com/mailru/easyjson v0.7.7`
diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go
new file mode 100644
index 000000000..20a359bb6
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/initialism_index.go
@@ -0,0 +1,202 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "sort"
+ "strings"
+ "sync"
+)
+
+var (
+ // commonInitialisms are common acronyms that are kept as whole uppercased words.
+ commonInitialisms *indexOfInitialisms
+
+ // initialisms is a slice of sorted initialisms
+ initialisms []string
+
+ // a copy of initialisms pre-baked as []rune
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune
+
+ isInitialism func(string) bool
+
+ maxAllocMatches int
+)
+
+func init() {
+ // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
+ configuredInitialisms := map[string]bool{
+ "ACL": true,
+ "API": true,
+ "ASCII": true,
+ "CPU": true,
+ "CSS": true,
+ "DNS": true,
+ "EOF": true,
+ "GUID": true,
+ "HTML": true,
+ "HTTPS": true,
+ "HTTP": true,
+ "ID": true,
+ "IP": true,
+ "IPv4": true,
+ "IPv6": true,
+ "JSON": true,
+ "LHS": true,
+ "OAI": true,
+ "QPS": true,
+ "RAM": true,
+ "RHS": true,
+ "RPC": true,
+ "SLA": true,
+ "SMTP": true,
+ "SQL": true,
+ "SSH": true,
+ "TCP": true,
+ "TLS": true,
+ "TTL": true,
+ "UDP": true,
+ "UI": true,
+ "UID": true,
+ "UUID": true,
+ "URI": true,
+ "URL": true,
+ "UTF8": true,
+ "VM": true,
+ "XML": true,
+ "XMPP": true,
+ "XSRF": true,
+ "XSS": true,
+ }
+
+ // a thread-safe index of initialisms
+ commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+ maxAllocMatches = maxAllocHeuristic(initialismsRunes)
+
+ // a test function
+ isInitialism = commonInitialisms.isInitialism
+}
+
+func asRunes(in []string) [][]rune {
+ out := make([][]rune, len(in))
+ for i, initialism := range in {
+ out[i] = []rune(initialism)
+ }
+
+ return out
+}
+
+func asUpperCased(in []string) [][]rune {
+ out := make([][]rune, len(in))
+
+ for i, initialism := range in {
+ out[i] = []rune(upper(trim(initialism)))
+ }
+
+ return out
+}
+
+func maxAllocHeuristic(in [][]rune) int {
+ heuristic := make(map[rune]int)
+ for _, initialism := range in {
+ heuristic[initialism[0]]++
+ }
+
+ var maxAlloc int
+ for _, val := range heuristic {
+ if val > maxAlloc {
+ maxAlloc = val
+ }
+ }
+
+ return maxAlloc
+}
+
+// AddInitialisms add additional initialisms
+func AddInitialisms(words ...string) {
+ for _, word := range words {
+ // commonInitialisms[upper(word)] = true
+ commonInitialisms.add(upper(word))
+ }
+ // sort again
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+}
+
+// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
+// Since go1.9, this may be implemented with sync.Map.
+type indexOfInitialisms struct {
+ sortMutex *sync.Mutex
+ index *sync.Map
+}
+
+func newIndexOfInitialisms() *indexOfInitialisms {
+ return &indexOfInitialisms{
+ sortMutex: new(sync.Mutex),
+ index: new(sync.Map),
+ }
+}
+
+func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ for k, v := range initial {
+ m.index.Store(k, v)
+ }
+ return m
+}
+
+func (m *indexOfInitialisms) isInitialism(key string) bool {
+ _, ok := m.index.Load(key)
+ return ok
+}
+
+func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
+ m.index.Store(key, true)
+ return m
+}
+
+func (m *indexOfInitialisms) sorted() (result []string) {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ m.index.Range(func(key, _ interface{}) bool {
+ k := key.(string)
+ result = append(result, k)
+ return true
+ })
+ sort.Sort(sort.Reverse(byInitialism(result)))
+ return
+}
+
+type byInitialism []string
+
+func (s byInitialism) Len() int {
+ return len(s)
+}
+func (s byInitialism) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s byInitialism) Less(i, j int) bool {
+ if len(s[i]) != len(s[j]) {
+ return len(s[i]) < len(s[j])
+ }
+
+ return strings.Compare(s[i], s[j]) > 0
+}
diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go
index 00038c377..783442fdd 100644
--- a/vendor/github.com/go-openapi/swag/loading.go
+++ b/vendor/github.com/go-openapi/swag/loading.go
@@ -21,6 +21,7 @@ import (
"net/http"
"net/url"
"os"
+ "path"
"path/filepath"
"runtime"
"strings"
@@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = ""
var LoadHTTPCustomHeaders = map[string]string{}
// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
-func LoadFromFileOrHTTP(path string) ([]byte, error) {
- return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path)
+func LoadFromFileOrHTTP(pth string) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth)
}
// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in
// timeout arg allows for per request overriding of the request timeout
-func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) {
- return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path)
+func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth)
}
-// LoadStrategy returns a loader function for a given path or uri
-func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
- if strings.HasPrefix(path, "http") {
+// LoadStrategy returns a loader function for a given path or URI.
+//
+// The load strategy returns the remote load for any path starting with `http`.
+// So this works for any URI with a scheme `http` or `https`.
+//
+// The fallback strategy is to call the local loader.
+//
+// The local loader takes a local file system path (absolute or relative) as argument,
+// or alternatively a `file://...` URI, **without host** (see also below for windows).
+//
+// There are a few liberalities, initially intended to be tolerant regarding the URI syntax,
+// especially on windows.
+//
+// Before the local loader is called, the given path is transformed:
+// - percent-encoded characters are unescaped
+// - simple paths (e.g. `./folder/file`) are passed as-is
+// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too.
+//
+// For paths provided as URIs with the "file" scheme, please note that:
+// - `file://` is simply stripped.
+// This means that the host part of the URI is not parsed at all.
+// For example, `file:///folder/file" becomes "/folder/file`,
+// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems.
+// Similarly, `file://./folder/file` yields `./folder/file`.
+// - on windows, `file://...` can take a host so as to specify an UNC share location.
+//
+// Reminder about windows-specifics:
+// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported)
+// - `file:///c:/folder/file` becomes `C:\folder\file`
+// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file`
+func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
+ if strings.HasPrefix(pth, "http") {
return remote
}
- return func(pth string) ([]byte, error) {
- upth, err := pathUnescape(pth)
+
+ return func(p string) ([]byte, error) {
+ upth, err := url.PathUnescape(p)
if err != nil {
return nil, err
}
- if strings.HasPrefix(pth, `file://`) {
- if runtime.GOOS == "windows" {
- // support for canonical file URIs on windows.
- // Zero tolerance here for dodgy URIs.
- u, _ := url.Parse(upth)
- if u.Host != "" {
- // assume UNC name (volume share)
- // file://host/share/folder\... ==> \\host\share\path\folder
- // NOTE: UNC port not yet supported
- upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`)
- } else {
- // file:///c:/folder/... ==> just remove the leading slash
- upth = strings.TrimPrefix(upth, `file:///`)
- }
- } else {
- upth = strings.TrimPrefix(upth, `file://`)
+ if !strings.HasPrefix(p, `file://`) {
+ // regular file path provided: just normalize slashes
+ return local(filepath.FromSlash(upth))
+ }
+
+ if runtime.GOOS != "windows" {
+ // crude processing: this leaves full URIs with a host with a (mostly) unexpected result
+ upth = strings.TrimPrefix(upth, `file://`)
+
+ return local(filepath.FromSlash(upth))
+ }
+
+ // windows-only pre-processing of file://... URIs
+
+ // support for canonical file URIs on windows.
+ u, err := url.Parse(filepath.ToSlash(upth))
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Host != "" {
+ // assume UNC name (volume share)
+ // NOTE: UNC port not yet supported
+
+ // when the "host" segment is a drive letter:
+ // file://C:/folder/... => C:\folder
+ upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`))
+ if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' {
+ // tolerance: if we have a leading dot, this can't be a host
+ // file://host/share/folder\... ==> \\host\share\path\folder
+ upth = "//" + upth
+ }
+ } else {
+ // no host, let's figure out if this is a drive letter
+ upth = strings.TrimPrefix(upth, `file://`)
+ first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/")
+ if strings.HasSuffix(first, ":") {
+ // drive letter in the first segment:
+ // file:///c:/folder/... ==> strip the leading slash
+ upth = strings.TrimPrefix(upth, `/`)
}
}
diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go
index aa7f6a9bb..8bb64ac32 100644
--- a/vendor/github.com/go-openapi/swag/name_lexem.go
+++ b/vendor/github.com/go-openapi/swag/name_lexem.go
@@ -14,74 +14,80 @@
package swag
-import "unicode"
+import (
+ "unicode"
+ "unicode/utf8"
+)
type (
- nameLexem interface {
- GetUnsafeGoName() string
- GetOriginal() string
- IsInitialism() bool
- }
+ lexemKind uint8
- initialismNameLexem struct {
+ nameLexem struct {
original string
matchedInitialism string
+ kind lexemKind
}
+)
- casualNameLexem struct {
- original string
- }
+const (
+ lexemKindCasualName lexemKind = iota
+ lexemKindInitialismName
)
-func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem {
- return &initialismNameLexem{
+func newInitialismNameLexem(original, matchedInitialism string) nameLexem {
+ return nameLexem{
+ kind: lexemKindInitialismName,
original: original,
matchedInitialism: matchedInitialism,
}
}
-func newCasualNameLexem(original string) *casualNameLexem {
- return &casualNameLexem{
+func newCasualNameLexem(original string) nameLexem {
+ return nameLexem{
+ kind: lexemKindCasualName,
original: original,
}
}
-func (l *initialismNameLexem) GetUnsafeGoName() string {
- return l.matchedInitialism
-}
+func (l nameLexem) GetUnsafeGoName() string {
+ if l.kind == lexemKindInitialismName {
+ return l.matchedInitialism
+ }
+
+ var (
+ first rune
+ rest string
+ )
-func (l *casualNameLexem) GetUnsafeGoName() string {
- var first rune
- var rest string
for i, orig := range l.original {
if i == 0 {
first = orig
continue
}
+
if i > 0 {
rest = l.original[i:]
break
}
}
+
if len(l.original) > 1 {
- return string(unicode.ToUpper(first)) + lower(rest)
+ b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(b)
+ }()
+ b.WriteRune(unicode.ToUpper(first))
+ b.WriteString(lower(rest))
+ return b.String()
}
return l.original
}
-func (l *initialismNameLexem) GetOriginal() string {
+func (l nameLexem) GetOriginal() string {
return l.original
}
-func (l *casualNameLexem) GetOriginal() string {
- return l.original
-}
-
-func (l *initialismNameLexem) IsInitialism() bool {
- return true
-}
-
-func (l *casualNameLexem) IsInitialism() bool {
- return false
+func (l nameLexem) IsInitialism() bool {
+ return l.kind == lexemKindInitialismName
}
diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go
deleted file mode 100644
index f5228b82c..000000000
--- a/vendor/github.com/go-openapi/swag/post_go18.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.8
-// +build go1.8
-
-package swag
-
-import "net/url"
-
-func pathUnescape(path string) (string, error) {
- return url.PathUnescape(path)
-}
diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go
deleted file mode 100644
index 7c7da9c08..000000000
--- a/vendor/github.com/go-openapi/swag/post_go19.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.9
-// +build go1.9
-
-package swag
-
-import (
- "sort"
- "sync"
-)
-
-// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
-// Since go1.9, this may be implemented with sync.Map.
-type indexOfInitialisms struct {
- sortMutex *sync.Mutex
- index *sync.Map
-}
-
-func newIndexOfInitialisms() *indexOfInitialisms {
- return &indexOfInitialisms{
- sortMutex: new(sync.Mutex),
- index: new(sync.Map),
- }
-}
-
-func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
- m.sortMutex.Lock()
- defer m.sortMutex.Unlock()
- for k, v := range initial {
- m.index.Store(k, v)
- }
- return m
-}
-
-func (m *indexOfInitialisms) isInitialism(key string) bool {
- _, ok := m.index.Load(key)
- return ok
-}
-
-func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
- m.index.Store(key, true)
- return m
-}
-
-func (m *indexOfInitialisms) sorted() (result []string) {
- m.sortMutex.Lock()
- defer m.sortMutex.Unlock()
- m.index.Range(func(key, value interface{}) bool {
- k := key.(string)
- result = append(result, k)
- return true
- })
- sort.Sort(sort.Reverse(byInitialism(result)))
- return
-}
diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go
deleted file mode 100644
index 2757d9b95..000000000
--- a/vendor/github.com/go-openapi/swag/pre_go18.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.8
-// +build !go1.8
-
-package swag
-
-import "net/url"
-
-func pathUnescape(path string) (string, error) {
- return url.QueryUnescape(path)
-}
diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go
deleted file mode 100644
index 0565db377..000000000
--- a/vendor/github.com/go-openapi/swag/pre_go19.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.9
-// +build !go1.9
-
-package swag
-
-import (
- "sort"
- "sync"
-)
-
-// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
-// Before go1.9, this may be implemented with a mutex on the map.
-type indexOfInitialisms struct {
- getMutex *sync.Mutex
- index map[string]bool
-}
-
-func newIndexOfInitialisms() *indexOfInitialisms {
- return &indexOfInitialisms{
- getMutex: new(sync.Mutex),
- index: make(map[string]bool, 50),
- }
-}
-
-func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- for k, v := range initial {
- m.index[k] = v
- }
- return m
-}
-
-func (m *indexOfInitialisms) isInitialism(key string) bool {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- _, ok := m.index[key]
- return ok
-}
-
-func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- m.index[key] = true
- return m
-}
-
-func (m *indexOfInitialisms) sorted() (result []string) {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- for k := range m.index {
- result = append(result, k)
- }
- sort.Sort(sort.Reverse(byInitialism(result)))
- return
-}
diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go
index a1825fb7d..274727a86 100644
--- a/vendor/github.com/go-openapi/swag/split.go
+++ b/vendor/github.com/go-openapi/swag/split.go
@@ -15,124 +15,269 @@
package swag
import (
+ "bytes"
+ "sync"
"unicode"
+ "unicode/utf8"
)
-var nameReplaceTable = map[rune]string{
- '@': "At ",
- '&': "And ",
- '|': "Pipe ",
- '$': "Dollar ",
- '!': "Bang ",
- '-': "",
- '_': "",
-}
-
type (
splitter struct {
- postSplitInitialismCheck bool
initialisms []string
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version
+ postSplitInitialismCheck bool
+ }
+
+ splitterOption func(*splitter)
+
+ initialismMatch struct {
+ body []rune
+ start, end int
+ complete bool
+ }
+ initialismMatches []initialismMatch
+)
+
+type (
+ // memory pools of temporary objects.
+ //
+ // These are used to recycle temporarily allocated objects
+ // and relieve the GC from undue pressure.
+
+ matchesPool struct {
+ *sync.Pool
}
- splitterOption func(*splitter) *splitter
+ buffersPool struct {
+ *sync.Pool
+ }
+
+ lexemsPool struct {
+ *sync.Pool
+ }
+
+ splittersPool struct {
+ *sync.Pool
+ }
)
-// split calls the splitter; splitter provides more control and post options
+var (
+ // poolOfMatches holds temporary slices for recycling during the initialism match process
+ poolOfMatches = matchesPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make(initialismMatches, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfBuffers = buffersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+ },
+ }
+
+ poolOfLexems = lexemsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make([]nameLexem, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfSplitters = splittersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := newSplitter()
+
+ return &s
+ },
+ },
+ }
+)
+
+// nameReplaceTable finds a word representation for special characters.
+func nameReplaceTable(r rune) (string, bool) {
+ switch r {
+ case '@':
+ return "At ", true
+ case '&':
+ return "And ", true
+ case '|':
+ return "Pipe ", true
+ case '$':
+ return "Dollar ", true
+ case '!':
+ return "Bang ", true
+ case '-':
+ return "", true
+ case '_':
+ return "", true
+ default:
+ return "", false
+ }
+}
+
+// split calls the splitter.
+//
+// Use newSplitter for more control and options
func split(str string) []string {
- lexems := newSplitter().split(str)
- result := make([]string, 0, len(lexems))
+ s := poolOfSplitters.BorrowSplitter()
+ lexems := s.split(str)
+ result := make([]string, 0, len(*lexems))
- for _, lexem := range lexems {
+ for _, lexem := range *lexems {
result = append(result, lexem.GetOriginal())
}
+ poolOfLexems.RedeemLexems(lexems)
+ poolOfSplitters.RedeemSplitter(s)
return result
}
-func (s *splitter) split(str string) []nameLexem {
- return s.toNameLexems(str)
-}
-
-func newSplitter(options ...splitterOption) *splitter {
- splitter := &splitter{
+func newSplitter(options ...splitterOption) splitter {
+ s := splitter{
postSplitInitialismCheck: false,
initialisms: initialisms,
+ initialismsRunes: initialismsRunes,
+ initialismsUpperCased: initialismsUpperCased,
}
for _, option := range options {
- splitter = option(splitter)
+ option(&s)
}
- return splitter
+ return s
}
// withPostSplitInitialismCheck allows to catch initialisms after main split process
-func withPostSplitInitialismCheck(s *splitter) *splitter {
+func withPostSplitInitialismCheck(s *splitter) {
s.postSplitInitialismCheck = true
+}
+
+func (p matchesPool) BorrowMatches() *initialismMatches {
+ s := p.Get().(*initialismMatches)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
return s
}
-type (
- initialismMatch struct {
- start, end int
- body []rune
- complete bool
+func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer {
+ s := p.Get().(*bytes.Buffer)
+ s.Reset()
+
+ if s.Cap() < size {
+ s.Grow(size)
}
- initialismMatches []*initialismMatch
-)
-func (s *splitter) toNameLexems(name string) []nameLexem {
+ return s
+}
+
+func (p lexemsPool) BorrowLexems() *[]nameLexem {
+ s := p.Get().(*[]nameLexem)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
+ return s
+}
+
+func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter {
+ s := p.Get().(*splitter)
+ s.postSplitInitialismCheck = false // reset options
+ for _, apply := range options {
+ apply(s)
+ }
+
+ return s
+}
+
+func (p matchesPool) RedeemMatches(s *initialismMatches) {
+ p.Put(s)
+}
+
+func (p buffersPool) RedeemBuffer(s *bytes.Buffer) {
+ p.Put(s)
+}
+
+func (p lexemsPool) RedeemLexems(s *[]nameLexem) {
+ p.Put(s)
+}
+
+func (p splittersPool) RedeemSplitter(s *splitter) {
+ p.Put(s)
+}
+
+func (m initialismMatch) isZero() bool {
+ return m.start == 0 && m.end == 0
+}
+
+func (s splitter) split(name string) *[]nameLexem {
nameRunes := []rune(name)
matches := s.gatherInitialismMatches(nameRunes)
+ if matches == nil {
+ return poolOfLexems.BorrowLexems()
+ }
+
return s.mapMatchesToNameLexems(nameRunes, matches)
}
-func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
- matches := make(initialismMatches, 0)
+func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches {
+ var matches *initialismMatches
for currentRunePosition, currentRune := range nameRunes {
- newMatches := make(initialismMatches, 0, len(matches))
+ // recycle these allocations as we loop over runes
+ // with such recycling, only 2 slices should be allocated per call
+ // instead of o(n).
+ newMatches := poolOfMatches.BorrowMatches()
// check current initialism matches
- for _, match := range matches {
- if keepCompleteMatch := match.complete; keepCompleteMatch {
- newMatches = append(newMatches, match)
- continue
- }
+ if matches != nil { // skip first iteration
+ for _, match := range *matches {
+ if keepCompleteMatch := match.complete; keepCompleteMatch {
+ *newMatches = append(*newMatches, match)
+ continue
+ }
- // drop failed match
- currentMatchRune := match.body[currentRunePosition-match.start]
- if !s.initialismRuneEqual(currentMatchRune, currentRune) {
- continue
- }
+ // drop failed match
+ currentMatchRune := match.body[currentRunePosition-match.start]
+ if currentMatchRune != currentRune {
+ continue
+ }
- // try to complete ongoing match
- if currentRunePosition-match.start == len(match.body)-1 {
- // we are close; the next step is to check the symbol ahead
- // if it is a small letter, then it is not the end of match
- // but beginning of the next word
-
- if currentRunePosition < len(nameRunes)-1 {
- nextRune := nameRunes[currentRunePosition+1]
- if newWord := unicode.IsLower(nextRune); newWord {
- // oh ok, it was the start of a new word
- continue
+ // try to complete ongoing match
+ if currentRunePosition-match.start == len(match.body)-1 {
+ // we are close; the next step is to check the symbol ahead
+ // if it is a small letter, then it is not the end of match
+ // but beginning of the next word
+
+ if currentRunePosition < len(nameRunes)-1 {
+ nextRune := nameRunes[currentRunePosition+1]
+ if newWord := unicode.IsLower(nextRune); newWord {
+ // oh ok, it was the start of a new word
+ continue
+ }
}
+
+ match.complete = true
+ match.end = currentRunePosition
}
- match.complete = true
- match.end = currentRunePosition
+ *newMatches = append(*newMatches, match)
}
-
- newMatches = append(newMatches, match)
}
// check for new initialism matches
- for _, initialism := range s.initialisms {
- initialismRunes := []rune(initialism)
- if s.initialismRuneEqual(initialismRunes[0], currentRune) {
- newMatches = append(newMatches, &initialismMatch{
+ for i := range s.initialisms {
+ initialismRunes := s.initialismsRunes[i]
+ if initialismRunes[0] == currentRune {
+ *newMatches = append(*newMatches, initialismMatch{
start: currentRunePosition,
body: initialismRunes,
complete: false,
@@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
}
}
+ if matches != nil {
+ poolOfMatches.RedeemMatches(matches)
+ }
matches = newMatches
}
+ // up to the caller to redeem this last slice
return matches
}
-func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem {
- nameLexems := make([]nameLexem, 0)
+func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem {
+ nameLexems := poolOfLexems.BorrowLexems()
- var lastAcceptedMatch *initialismMatch
- for _, match := range matches {
+ var lastAcceptedMatch initialismMatch
+ for _, match := range *matches {
if !match.complete {
continue
}
- if firstMatch := lastAcceptedMatch == nil; firstMatch {
- nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...)
- nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+ if firstMatch := lastAcceptedMatch.isZero(); firstMatch {
+ s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start])
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
@@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa
}
middle := nameRunes[lastAcceptedMatch.end+1 : match.start]
- nameLexems = append(nameLexems, s.breakCasualString(middle)...)
- nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+ s.appendBrokenDownCasualString(nameLexems, middle)
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
}
// we have not found any accepted matches
- if lastAcceptedMatch == nil {
- return s.breakCasualString(nameRunes)
- }
-
- if lastAcceptedMatch.end+1 != len(nameRunes) {
+ if lastAcceptedMatch.isZero() {
+ *nameLexems = (*nameLexems)[:0]
+ s.appendBrokenDownCasualString(nameLexems, nameRunes)
+ } else if lastAcceptedMatch.end+1 != len(nameRunes) {
rest := nameRunes[lastAcceptedMatch.end+1:]
- nameLexems = append(nameLexems, s.breakCasualString(rest)...)
+ s.appendBrokenDownCasualString(nameLexems, rest)
}
- return nameLexems
-}
+ poolOfMatches.RedeemMatches(matches)
-func (s *splitter) initialismRuneEqual(a, b rune) bool {
- return a == b
+ return nameLexems
}
-func (s *splitter) breakInitialism(original string) nameLexem {
+func (s splitter) breakInitialism(original string) nameLexem {
return newInitialismNameLexem(original, original)
}
-func (s *splitter) breakCasualString(str []rune) []nameLexem {
- segments := make([]nameLexem, 0)
- currentSegment := ""
+func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) {
+ currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused
+ defer func() {
+ poolOfBuffers.RedeemBuffer(currentSegment)
+ }()
addCasualNameLexem := func(original string) {
- segments = append(segments, newCasualNameLexem(original))
+ *segments = append(*segments, newCasualNameLexem(original))
}
addInitialismNameLexem := func(original, match string) {
- segments = append(segments, newInitialismNameLexem(original, match))
+ *segments = append(*segments, newInitialismNameLexem(original, match))
}
- addNameLexem := func(original string) {
- if s.postSplitInitialismCheck {
- for _, initialism := range s.initialisms {
- if upper(initialism) == upper(original) {
- addInitialismNameLexem(original, initialism)
+ var addNameLexem func(string)
+ if s.postSplitInitialismCheck {
+ addNameLexem = func(original string) {
+ for i := range s.initialisms {
+ if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) {
+ addInitialismNameLexem(original, s.initialisms[i])
+
return
}
}
- }
- addCasualNameLexem(original)
+ addCasualNameLexem(original)
+ }
+ } else {
+ addNameLexem = addCasualNameLexem
}
- for _, rn := range string(str) {
- if replace, found := nameReplaceTable[rn]; found {
- if currentSegment != "" {
- addNameLexem(currentSegment)
- currentSegment = ""
+ for _, rn := range str {
+ if replace, found := nameReplaceTable(rn); found {
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
}
if replace != "" {
@@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem {
}
if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) {
- if currentSegment != "" {
- addNameLexem(currentSegment)
- currentSegment = ""
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
}
continue
}
if unicode.IsUpper(rn) {
- if currentSegment != "" {
- addNameLexem(currentSegment)
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
}
- currentSegment = ""
+ currentSegment.Reset()
}
- currentSegment += string(rn)
+ currentSegment.WriteRune(rn)
+ }
+
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
}
+}
+
+// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but
+// it ignores leading and trailing blank spaces in the compared
+// string.
+//
+// base is assumed to be composed of upper-cased runes, and be already
+// trimmed.
+//
+// This code is heavily inspired from strings.EqualFold.
+func isEqualFoldIgnoreSpace(base []rune, str string) bool {
+ var i, baseIndex int
+ // equivalent to b := []byte(str), but without data copy
+ b := hackStringBytes(str)
+
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ break
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ break
+ }
+ i += size
+ }
+
+ if i >= len(b) {
+ return len(base) == 0
+ }
+
+ for _, baseRune := range base {
+ if i >= len(b) {
+ break
+ }
+
+ if c := b[i]; c < utf8.RuneSelf {
+ // single byte rune case (ASCII)
+ if baseRune >= utf8.RuneSelf {
+ return false
+ }
+
+ baseChar := byte(baseRune)
+ if c != baseChar &&
+ !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) {
+ return false
+ }
+
+ baseIndex++
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if unicode.ToUpper(r) != baseRune {
+ return false
+ }
+ baseIndex++
+ i += size
+ }
+
+ if baseIndex != len(base) {
+ return false
+ }
+
+ // all passed: now we should only have blanks
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ return false
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ return false
+ }
- if currentSegment != "" {
- addNameLexem(currentSegment)
+ i += size
}
- return segments
+ return true
}
diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go
new file mode 100644
index 000000000..90745d5ca
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/string_bytes.go
@@ -0,0 +1,8 @@
+package swag
+
+import "unsafe"
+
+// hackStringBytes returns the (unsafe) underlying bytes slice of a string.
+func hackStringBytes(str string) []byte {
+ return unsafe.Slice(unsafe.StringData(str), len(str))
+}
diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go
index f78ab684a..5051401c4 100644
--- a/vendor/github.com/go-openapi/swag/util.go
+++ b/vendor/github.com/go-openapi/swag/util.go
@@ -18,76 +18,25 @@ import (
"reflect"
"strings"
"unicode"
+ "unicode/utf8"
)
-// commonInitialisms are common acronyms that are kept as whole uppercased words.
-var commonInitialisms *indexOfInitialisms
-
-// initialisms is a slice of sorted initialisms
-var initialisms []string
-
-var isInitialism func(string) bool
-
// GoNamePrefixFunc sets an optional rule to prefix go names
// which do not start with a letter.
//
+// The prefix function is assumed to return a string that starts with an upper case letter.
+//
// e.g. to help convert "123" into "{prefix}123"
//
// The default is to prefix with "X"
var GoNamePrefixFunc func(string) string
-func init() {
- // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
- var configuredInitialisms = map[string]bool{
- "ACL": true,
- "API": true,
- "ASCII": true,
- "CPU": true,
- "CSS": true,
- "DNS": true,
- "EOF": true,
- "GUID": true,
- "HTML": true,
- "HTTPS": true,
- "HTTP": true,
- "ID": true,
- "IP": true,
- "IPv4": true,
- "IPv6": true,
- "JSON": true,
- "LHS": true,
- "OAI": true,
- "QPS": true,
- "RAM": true,
- "RHS": true,
- "RPC": true,
- "SLA": true,
- "SMTP": true,
- "SQL": true,
- "SSH": true,
- "TCP": true,
- "TLS": true,
- "TTL": true,
- "UDP": true,
- "UI": true,
- "UID": true,
- "UUID": true,
- "URI": true,
- "URL": true,
- "UTF8": true,
- "VM": true,
- "XML": true,
- "XMPP": true,
- "XSRF": true,
- "XSS": true,
+func prefixFunc(name, in string) string {
+ if GoNamePrefixFunc == nil {
+ return "X" + in
}
- // a thread-safe index of initialisms
- commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
- initialisms = commonInitialisms.sorted()
-
- // a test function
- isInitialism = commonInitialisms.isInitialism
+ return GoNamePrefixFunc(name) + in
}
const (
@@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string {
return result
}
-type byInitialism []string
-
-func (s byInitialism) Len() int {
- return len(s)
-}
-func (s byInitialism) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-func (s byInitialism) Less(i, j int) bool {
- if len(s[i]) != len(s[j]) {
- return len(s[i]) < len(s[j])
- }
-
- return strings.Compare(s[i], s[j]) > 0
-}
-
// Removes leading whitespaces
func trim(str string) string {
- return strings.Trim(str, " ")
+ return strings.TrimSpace(str)
}
// Shortcut to strings.ToUpper()
@@ -188,15 +121,20 @@ func lower(str string) string {
}
// Camelize an uppercased word
-func Camelize(word string) (camelized string) {
+func Camelize(word string) string {
+ camelized := poolOfBuffers.BorrowBuffer(len(word))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(camelized)
+ }()
+
for pos, ru := range []rune(word) {
if pos > 0 {
- camelized += string(unicode.ToLower(ru))
+ camelized.WriteRune(unicode.ToLower(ru))
} else {
- camelized += string(unicode.ToUpper(ru))
+ camelized.WriteRune(unicode.ToUpper(ru))
}
}
- return
+ return camelized.String()
}
// ToFileName lowercases and underscores a go type name
@@ -224,33 +162,40 @@ func ToCommandName(name string) string {
// ToHumanNameLower represents a code name as a human series of words
func ToHumanNameLower(name string) string {
- in := newSplitter(withPostSplitInitialismCheck).split(name)
- out := make([]string, 0, len(in))
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ out := make([]string, 0, len(*in))
- for _, w := range in {
+ for _, w := range *in {
if !w.IsInitialism() {
out = append(out, lower(w.GetOriginal()))
} else {
- out = append(out, w.GetOriginal())
+ out = append(out, trim(w.GetOriginal()))
}
}
+ poolOfLexems.RedeemLexems(in)
return strings.Join(out, " ")
}
// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
func ToHumanNameTitle(name string) string {
- in := newSplitter(withPostSplitInitialismCheck).split(name)
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
- out := make([]string, 0, len(in))
- for _, w := range in {
- original := w.GetOriginal()
+ out := make([]string, 0, len(*in))
+ for _, w := range *in {
+ original := trim(w.GetOriginal())
if !w.IsInitialism() {
out = append(out, Camelize(original))
} else {
out = append(out, original)
}
}
+ poolOfLexems.RedeemLexems(in)
+
return strings.Join(out, " ")
}
@@ -264,7 +209,7 @@ func ToJSONName(name string) string {
out = append(out, lower(w))
continue
}
- out = append(out, Camelize(w))
+ out = append(out, Camelize(trim(w)))
}
return strings.Join(out, "")
}
@@ -283,35 +228,70 @@ func ToVarName(name string) string {
// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
func ToGoName(name string) string {
- lexems := newSplitter(withPostSplitInitialismCheck).split(name)
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ lexems := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ defer func() {
+ poolOfLexems.RedeemLexems(lexems)
+ }()
+ lexemes := *lexems
+
+ if len(lexemes) == 0 {
+ return ""
+ }
+
+ result := poolOfBuffers.BorrowBuffer(len(name))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(result)
+ }()
- result := ""
- for _, lexem := range lexems {
+ // check if not starting with a letter, upper case
+ firstPart := lexemes[0].GetUnsafeGoName()
+ if lexemes[0].IsInitialism() {
+ firstPart = upper(firstPart)
+ }
+
+ if c := firstPart[0]; c < utf8.RuneSelf {
+ // ASCII
+ switch {
+ case 'A' <= c && c <= 'Z':
+ result.WriteString(firstPart)
+ case 'a' <= c && c <= 'z':
+ result.WriteByte(c - 'a' + 'A')
+ result.WriteString(firstPart[1:])
+ default:
+ result.WriteString(prefixFunc(name, firstPart))
+ // NOTE: no longer check if prefixFunc returns a string that starts with uppercase:
+ // assume this is always the case
+ }
+ } else {
+ // unicode
+ firstRune, _ := utf8.DecodeRuneInString(firstPart)
+ switch {
+ case !unicode.IsLetter(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ case !unicode.IsUpper(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ /*
+ result.WriteRune(unicode.ToUpper(firstRune))
+ result.WriteString(firstPart[offset:])
+ */
+ default:
+ result.WriteString(firstPart)
+ }
+ }
+
+ for _, lexem := range lexemes[1:] {
goName := lexem.GetUnsafeGoName()
// to support old behavior
if lexem.IsInitialism() {
goName = upper(goName)
}
- result += goName
+ result.WriteString(goName)
}
- if len(result) > 0 {
- // Only prefix with X when the first character isn't an ascii letter
- first := []rune(result)[0]
- if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) {
- if GoNamePrefixFunc == nil {
- return "X" + result
- }
- result = GoNamePrefixFunc(name) + result
- }
- first = []rune(result)[0]
- if unicode.IsLetter(first) && !unicode.IsUpper(first) {
- result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...))
- }
- }
-
- return result
+ return result.String()
}
// ContainsStrings searches a slice of strings for a case-sensitive match
@@ -341,13 +321,22 @@ type zeroable interface {
// IsZero returns true when the value passed into the function is a zero value.
// This allows for safer checking of interface values.
func IsZero(data interface{}) bool {
+ v := reflect.ValueOf(data)
+ // check for nil data
+ switch v.Kind() { //nolint:exhaustive
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ if v.IsNil() {
+ return true
+ }
+ }
+
// check for things that have an IsZero method instead
if vv, ok := data.(zeroable); ok {
return vv.IsZero()
}
+
// continue with slightly more complex reflection
- v := reflect.ValueOf(data)
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.String:
return v.Len() == 0
case reflect.Bool:
@@ -358,24 +347,13 @@ func IsZero(data interface{}) bool {
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return v.IsNil()
case reflect.Struct, reflect.Array:
return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface())
case reflect.Invalid:
return true
+ default:
+ return false
}
- return false
-}
-
-// AddInitialisms add additional initialisms
-func AddInitialisms(words ...string) {
- for _, word := range words {
- // commonInitialisms[upper(word)] = true
- commonInitialisms.add(upper(word))
- }
- // sort again
- initialisms = commonInitialisms.sorted()
}
// CommandLineOptionsGroup represents a group of user-defined command line options
diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go
index f09ee609f..f59e02593 100644
--- a/vendor/github.com/go-openapi/swag/yaml.go
+++ b/vendor/github.com/go-openapi/swag/yaml.go
@@ -16,8 +16,11 @@ package swag
import (
"encoding/json"
+ "errors"
"fmt"
"path/filepath"
+ "reflect"
+ "sort"
"strconv"
"github.com/mailru/easyjson/jlexer"
@@ -48,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) {
return nil, err
}
if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
- return nil, fmt.Errorf("only YAML documents that are objects are supported")
+ return nil, errors.New("only YAML documents that are objects are supported")
}
return &document, nil
}
@@ -147,7 +150,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) {
case yamlTimestamp:
return node.Value, nil
case yamlNull:
- return nil, nil
+ return nil, nil //nolint:nilnil
default:
return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag())
}
@@ -245,7 +248,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) {
return yaml.Marshal(&n)
}
+func isNil(input interface{}) bool {
+ if input == nil {
+ return true
+ }
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
func json2yaml(item interface{}) (*yaml.Node, error) {
+ if isNil(item) {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Value: "null",
+ }, nil
+ }
+
switch val := item.(type) {
case JSONMapSlice:
var n yaml.Node
@@ -265,7 +288,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
case map[string]interface{}:
var n yaml.Node
n.Kind = yaml.MappingNode
- for k, v := range val {
+ keys := make([]string, 0, len(val))
+ for k := range val {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := val[k]
childNode, err := json2yaml(v)
if err != nil {
return nil, err
@@ -318,8 +348,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
Tag: yamlBoolScalar,
Value: strconv.FormatBool(val),
}, nil
+ default:
+ return nil, fmt.Errorf("unhandled type: %T", val)
}
- return nil, nil
}
// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice
diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE
deleted file mode 100644
index 37ec93a14..000000000
--- a/vendor/github.com/golang/groupcache/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go
deleted file mode 100644
index eac1c7664..000000000
--- a/vendor/github.com/golang/groupcache/lru/lru.go
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
-Copyright 2013 Google Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package lru implements an LRU cache.
-package lru
-
-import "container/list"
-
-// Cache is an LRU cache. It is not safe for concurrent access.
-type Cache struct {
- // MaxEntries is the maximum number of cache entries before
- // an item is evicted. Zero means no limit.
- MaxEntries int
-
- // OnEvicted optionally specifies a callback function to be
- // executed when an entry is purged from the cache.
- OnEvicted func(key Key, value interface{})
-
- ll *list.List
- cache map[interface{}]*list.Element
-}
-
-// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
-type Key interface{}
-
-type entry struct {
- key Key
- value interface{}
-}
-
-// New creates a new Cache.
-// If maxEntries is zero, the cache has no limit and it's assumed
-// that eviction is done by the caller.
-func New(maxEntries int) *Cache {
- return &Cache{
- MaxEntries: maxEntries,
- ll: list.New(),
- cache: make(map[interface{}]*list.Element),
- }
-}
-
-// Add adds a value to the cache.
-func (c *Cache) Add(key Key, value interface{}) {
- if c.cache == nil {
- c.cache = make(map[interface{}]*list.Element)
- c.ll = list.New()
- }
- if ee, ok := c.cache[key]; ok {
- c.ll.MoveToFront(ee)
- ee.Value.(*entry).value = value
- return
- }
- ele := c.ll.PushFront(&entry{key, value})
- c.cache[key] = ele
- if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
- c.RemoveOldest()
- }
-}
-
-// Get looks up a key's value from the cache.
-func (c *Cache) Get(key Key) (value interface{}, ok bool) {
- if c.cache == nil {
- return
- }
- if ele, hit := c.cache[key]; hit {
- c.ll.MoveToFront(ele)
- return ele.Value.(*entry).value, true
- }
- return
-}
-
-// Remove removes the provided key from the cache.
-func (c *Cache) Remove(key Key) {
- if c.cache == nil {
- return
- }
- if ele, hit := c.cache[key]; hit {
- c.removeElement(ele)
- }
-}
-
-// RemoveOldest removes the oldest item from the cache.
-func (c *Cache) RemoveOldest() {
- if c.cache == nil {
- return
- }
- ele := c.ll.Back()
- if ele != nil {
- c.removeElement(ele)
- }
-}
-
-func (c *Cache) removeElement(e *list.Element) {
- c.ll.Remove(e)
- kv := e.Value.(*entry)
- delete(c.cache, kv.key)
- if c.OnEvicted != nil {
- c.OnEvicted(kv.key, kv.value)
- }
-}
-
-// Len returns the number of items in the cache.
-func (c *Cache) Len() int {
- if c.cache == nil {
- return 0
- }
- return c.ll.Len()
-}
-
-// Clear purges all stored items from the cache.
-func (c *Cache) Clear() {
- if c.OnEvicted != nil {
- for _, e := range c.cache {
- kv := e.Value.(*entry)
- c.OnEvicted(kv.key, kv.value)
- }
- }
- c.ll = nil
- c.cache = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
deleted file mode 100644
index fdff3fdb4..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
- "fmt"
- "strings"
-
- "github.com/golang/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-
- anypb "github.com/golang/protobuf/ptypes/any"
-)
-
-const urlPrefix = "type.googleapis.com/"
-
-// AnyMessageName returns the message name contained in an anypb.Any message.
-// Most type assertions should use the Is function instead.
-//
-// Deprecated: Call the any.MessageName method instead.
-func AnyMessageName(any *anypb.Any) (string, error) {
- name, err := anyMessageName(any)
- return string(name), err
-}
-func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
- if any == nil {
- return "", fmt.Errorf("message is nil")
- }
- name := protoreflect.FullName(any.TypeUrl)
- if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
- name = name[i+len("/"):]
- }
- if !name.IsValid() {
- return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
- }
- return name, nil
-}
-
-// MarshalAny marshals the given message m into an anypb.Any message.
-//
-// Deprecated: Call the anypb.New function instead.
-func MarshalAny(m proto.Message) (*anypb.Any, error) {
- switch dm := m.(type) {
- case DynamicAny:
- m = dm.Message
- case *DynamicAny:
- if dm == nil {
- return nil, proto.ErrNil
- }
- m = dm.Message
- }
- b, err := proto.Marshal(m)
- if err != nil {
- return nil, err
- }
- return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
-}
-
-// Empty returns a new message of the type specified in an anypb.Any message.
-// It returns protoregistry.NotFound if the corresponding message type could not
-// be resolved in the global registry.
-//
-// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
-// to resolve the message name and create a new instance of it.
-func Empty(any *anypb.Any) (proto.Message, error) {
- name, err := anyMessageName(any)
- if err != nil {
- return nil, err
- }
- mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
- if err != nil {
- return nil, err
- }
- return proto.MessageV1(mt.New().Interface()), nil
-}
-
-// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
-// into the provided message m. It returns an error if the target message
-// does not match the type in the Any message or if an unmarshal error occurs.
-//
-// The target message m may be a *DynamicAny message. If the underlying message
-// type could not be resolved, then this returns protoregistry.NotFound.
-//
-// Deprecated: Call the any.UnmarshalTo method instead.
-func UnmarshalAny(any *anypb.Any, m proto.Message) error {
- if dm, ok := m.(*DynamicAny); ok {
- if dm.Message == nil {
- var err error
- dm.Message, err = Empty(any)
- if err != nil {
- return err
- }
- }
- m = dm.Message
- }
-
- anyName, err := AnyMessageName(any)
- if err != nil {
- return err
- }
- msgName := proto.MessageName(m)
- if anyName != msgName {
- return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
- }
- return proto.Unmarshal(any.Value, m)
-}
-
-// Is reports whether the Any message contains a message of the specified type.
-//
-// Deprecated: Call the any.MessageIs method instead.
-func Is(any *anypb.Any, m proto.Message) bool {
- if any == nil || m == nil {
- return false
- }
- name := proto.MessageName(m)
- if !strings.HasSuffix(any.TypeUrl, name) {
- return false
- }
- return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
-}
-
-// DynamicAny is a value that can be passed to UnmarshalAny to automatically
-// allocate a proto.Message for the type specified in an anypb.Any message.
-// The allocated message is stored in the embedded proto.Message.
-//
-// Example:
-//
-// var x ptypes.DynamicAny
-// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
-// fmt.Printf("unmarshaled message: %v", x.Message)
-//
-// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
-// the any message contents into a new instance of the underlying message.
-type DynamicAny struct{ proto.Message }
-
-func (m DynamicAny) String() string {
- if m.Message == nil {
- return ""
- }
- return m.Message.String()
-}
-func (m DynamicAny) Reset() {
- if m.Message == nil {
- return
- }
- m.Message.Reset()
-}
-func (m DynamicAny) ProtoMessage() {
- return
-}
-func (m DynamicAny) ProtoReflect() protoreflect.Message {
- if m.Message == nil {
- return nil
- }
- return dynamicAny{proto.MessageReflect(m.Message)}
-}
-
-type dynamicAny struct{ protoreflect.Message }
-
-func (m dynamicAny) Type() protoreflect.MessageType {
- return dynamicAnyType{m.Message.Type()}
-}
-func (m dynamicAny) New() protoreflect.Message {
- return dynamicAnyType{m.Message.Type()}.New()
-}
-func (m dynamicAny) Interface() protoreflect.ProtoMessage {
- return DynamicAny{proto.MessageV1(m.Message.Interface())}
-}
-
-type dynamicAnyType struct{ protoreflect.MessageType }
-
-func (t dynamicAnyType) New() protoreflect.Message {
- return dynamicAny{t.MessageType.New()}
-}
-func (t dynamicAnyType) Zero() protoreflect.Message {
- return dynamicAny{t.MessageType.Zero()}
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
deleted file mode 100644
index 0ef27d33d..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/any/any.proto
-
-package any
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- anypb "google.golang.org/protobuf/types/known/anypb"
- reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/any.proto.
-
-type Any = anypb.Any
-
-var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
- 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
- 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
- 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
- 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
-func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
- if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 0,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
- DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
- }.Build()
- File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
- file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
- file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
- file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
deleted file mode 100644
index d3c33259d..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ptypes provides functionality for interacting with well-known types.
-//
-// Deprecated: Well-known types have specialized functionality directly
-// injected into the generated packages for each message type.
-// See the deprecation notice for each function for the suggested alternative.
-package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
deleted file mode 100644
index b2b55dd85..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
- "errors"
- "fmt"
- "time"
-
- durationpb "github.com/golang/protobuf/ptypes/duration"
-)
-
-// Range of google.protobuf.Duration as specified in duration.proto.
-// This is about 10,000 years in seconds.
-const (
- maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
- minSeconds = -maxSeconds
-)
-
-// Duration converts a durationpb.Duration to a time.Duration.
-// Duration returns an error if dur is invalid or overflows a time.Duration.
-//
-// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
-func Duration(dur *durationpb.Duration) (time.Duration, error) {
- if err := validateDuration(dur); err != nil {
- return 0, err
- }
- d := time.Duration(dur.Seconds) * time.Second
- if int64(d/time.Second) != dur.Seconds {
- return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
- }
- if dur.Nanos != 0 {
- d += time.Duration(dur.Nanos) * time.Nanosecond
- if (d < 0) != (dur.Nanos < 0) {
- return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
- }
- }
- return d, nil
-}
-
-// DurationProto converts a time.Duration to a durationpb.Duration.
-//
-// Deprecated: Call the durationpb.New function instead.
-func DurationProto(d time.Duration) *durationpb.Duration {
- nanos := d.Nanoseconds()
- secs := nanos / 1e9
- nanos -= secs * 1e9
- return &durationpb.Duration{
- Seconds: int64(secs),
- Nanos: int32(nanos),
- }
-}
-
-// validateDuration determines whether the durationpb.Duration is valid
-// according to the definition in google/protobuf/duration.proto.
-// A valid durpb.Duration may still be too large to fit into a time.Duration
-// Note that the range of durationpb.Duration is about 10,000 years,
-// while the range of time.Duration is about 290 years.
-func validateDuration(dur *durationpb.Duration) error {
- if dur == nil {
- return errors.New("duration: nil Duration")
- }
- if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
- return fmt.Errorf("duration: %v: seconds out of range", dur)
- }
- if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
- return fmt.Errorf("duration: %v: nanos out of range", dur)
- }
- // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
- if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
- return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
- }
- return nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
deleted file mode 100644
index d0079ee3e..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/duration/duration.proto
-
-package duration
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- durationpb "google.golang.org/protobuf/types/known/durationpb"
- reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/duration.proto.
-
-type Duration = durationpb.Duration
-
-var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
- 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
- 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
- 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
- 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
-func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
- if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 0,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
- DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
- }.Build()
- File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
- file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
- file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
- file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
deleted file mode 100644
index 8368a3f70..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
- "errors"
- "fmt"
- "time"
-
- timestamppb "github.com/golang/protobuf/ptypes/timestamp"
-)
-
-// Range of google.protobuf.Duration as specified in timestamp.proto.
-const (
- // Seconds field of the earliest valid Timestamp.
- // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
- minValidSeconds = -62135596800
- // Seconds field just after the latest valid Timestamp.
- // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
- maxValidSeconds = 253402300800
-)
-
-// Timestamp converts a timestamppb.Timestamp to a time.Time.
-// It returns an error if the argument is invalid.
-//
-// Unlike most Go functions, if Timestamp returns an error, the first return
-// value is not the zero time.Time. Instead, it is the value obtained from the
-// time.Unix function when passed the contents of the Timestamp, in the UTC
-// locale. This may or may not be a meaningful time; many invalid Timestamps
-// do map to valid time.Times.
-//
-// A nil Timestamp returns an error. The first return value in that case is
-// undefined.
-//
-// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
-func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
- // Don't return the zero value on error, because corresponds to a valid
- // timestamp. Instead return whatever time.Unix gives us.
- var t time.Time
- if ts == nil {
- t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
- } else {
- t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
- }
- return t, validateTimestamp(ts)
-}
-
-// TimestampNow returns a google.protobuf.Timestamp for the current time.
-//
-// Deprecated: Call the timestamppb.Now function instead.
-func TimestampNow() *timestamppb.Timestamp {
- ts, err := TimestampProto(time.Now())
- if err != nil {
- panic("ptypes: time.Now() out of Timestamp range")
- }
- return ts
-}
-
-// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
-// It returns an error if the resulting Timestamp is invalid.
-//
-// Deprecated: Call the timestamppb.New function instead.
-func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
- ts := ×tamppb.Timestamp{
- Seconds: t.Unix(),
- Nanos: int32(t.Nanosecond()),
- }
- if err := validateTimestamp(ts); err != nil {
- return nil, err
- }
- return ts, nil
-}
-
-// TimestampString returns the RFC 3339 string for valid Timestamps.
-// For invalid Timestamps, it returns an error message in parentheses.
-//
-// Deprecated: Call the ts.AsTime method instead,
-// followed by a call to the Format method on the time.Time value.
-func TimestampString(ts *timestamppb.Timestamp) string {
- t, err := Timestamp(ts)
- if err != nil {
- return fmt.Sprintf("(%v)", err)
- }
- return t.Format(time.RFC3339Nano)
-}
-
-// validateTimestamp determines whether a Timestamp is valid.
-// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
-// and has a Nanos field in the range [0, 1e9).
-//
-// If the Timestamp is valid, validateTimestamp returns nil.
-// Otherwise, it returns an error that describes the problem.
-//
-// Every valid Timestamp can be represented by a time.Time,
-// but the converse is not true.
-func validateTimestamp(ts *timestamppb.Timestamp) error {
- if ts == nil {
- return errors.New("timestamp: nil Timestamp")
- }
- if ts.Seconds < minValidSeconds {
- return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
- }
- if ts.Seconds >= maxValidSeconds {
- return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
- }
- if ts.Nanos < 0 || ts.Nanos >= 1e9 {
- return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
- }
- return nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
deleted file mode 100644
index a76f80760..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
-
-package timestamp
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
- reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/timestamp.proto.
-
-type Timestamp = timestamppb.Timestamp
-
-var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
- 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
- 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
-func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
- if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 0,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
- DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
- }.Build()
- File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
- file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
- file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
- file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/github.com/google/btree/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/google/btree/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md
new file mode 100644
index 000000000..eab5dbf7b
--- /dev/null
+++ b/vendor/github.com/google/btree/README.md
@@ -0,0 +1,10 @@
+# BTree implementation for Go
+
+This package provides an in-memory B-Tree implementation for Go, useful as
+an ordered, mutable data structure.
+
+The API is based off of the wonderful
+http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
+act as a drop-in replacement for gollrb trees.
+
+See http://godoc.org/github.com/google/btree for documentation.
diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go
new file mode 100644
index 000000000..6f5184fef
--- /dev/null
+++ b/vendor/github.com/google/btree/btree.go
@@ -0,0 +1,893 @@
+// Copyright 2014 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.18
+// +build !go1.18
+
+// Package btree implements in-memory B-Trees of arbitrary degree.
+//
+// btree implements an in-memory B-Tree for use as an ordered data structure.
+// It is not meant for persistent storage solutions.
+//
+// It has a flatter structure than an equivalent red-black or other binary tree,
+// which in some cases yields better memory usage and/or performance.
+// See some discussion on the matter here:
+// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
+// Note, though, that this project is in no way related to the C++ B-Tree
+// implementation written about there.
+//
+// Within this tree, each node contains a slice of items and a (possibly nil)
+// slice of children. For basic numeric values or raw structs, this can cause
+// efficiency differences when compared to equivalent C++ template code that
+// stores values in arrays within the node:
+// * Due to the overhead of storing values as interfaces (each
+// value needs to be stored as the value itself, then 2 words for the
+// interface pointing to that value and its type), resulting in higher
+// memory use.
+// * Since interfaces can point to values anywhere in memory, values are
+// most likely not stored in contiguous blocks, resulting in a higher
+// number of cache misses.
+// These issues don't tend to matter, though, when working with strings or other
+// heap-allocated structures, since C++-equivalent structures also must store
+// pointers and also distribute their values across the heap.
+//
+// This implementation is designed to be a drop-in replacement to gollrb.LLRB
+// trees, (http://github.com/petar/gollrb), an excellent and probably the most
+// widely used ordered tree implementation in the Go ecosystem currently.
+// Its functions, therefore, exactly mirror those of
+// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
+// support storing multiple equivalent values.
+package btree
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+ "sync"
+)
+
+// Item represents a single object in the tree.
+type Item interface {
+ // Less tests whether the current item is less than the given argument.
+ //
+ // This must provide a strict weak ordering.
+ // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
+ // hold one of either a or b in the tree).
+ Less(than Item) bool
+}
+
+const (
+ DefaultFreeListSize = 32
+)
+
+var (
+ nilItems = make(items, 16)
+ nilChildren = make(children, 16)
+)
+
+// FreeList represents a free list of btree nodes. By default each
+// BTree has its own FreeList, but multiple BTrees can share the same
+// FreeList.
+// Two Btrees using the same freelist are safe for concurrent write access.
+type FreeList struct {
+ mu sync.Mutex
+ freelist []*node
+}
+
+// NewFreeList creates a new free list.
+// size is the maximum size of the returned free list.
+func NewFreeList(size int) *FreeList {
+ return &FreeList{freelist: make([]*node, 0, size)}
+}
+
+func (f *FreeList) newNode() (n *node) {
+ f.mu.Lock()
+ index := len(f.freelist) - 1
+ if index < 0 {
+ f.mu.Unlock()
+ return new(node)
+ }
+ n = f.freelist[index]
+ f.freelist[index] = nil
+ f.freelist = f.freelist[:index]
+ f.mu.Unlock()
+ return
+}
+
+// freeNode adds the given node to the list, returning true if it was added
+// and false if it was discarded.
+func (f *FreeList) freeNode(n *node) (out bool) {
+ f.mu.Lock()
+ if len(f.freelist) < cap(f.freelist) {
+ f.freelist = append(f.freelist, n)
+ out = true
+ }
+ f.mu.Unlock()
+ return
+}
+
+// ItemIterator allows callers of Ascend* to iterate in-order over portions of
+// the tree. When this function returns false, iteration will stop and the
+// associated Ascend* function will immediately return.
+type ItemIterator func(i Item) bool
+
+// New creates a new B-Tree with the given degree.
+//
+// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
+// and 2-4 children).
+func New(degree int) *BTree {
+ return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
+}
+
+// NewWithFreeList creates a new B-Tree that uses the given node free list.
+func NewWithFreeList(degree int, f *FreeList) *BTree {
+ if degree <= 1 {
+ panic("bad degree")
+ }
+ return &BTree{
+ degree: degree,
+ cow: ©OnWriteContext{freelist: f},
+ }
+}
+
+// items stores items in a node.
+type items []Item
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *items) insertAt(index int, item Item) {
+ *s = append(*s, nil)
+ if index < len(*s) {
+ copy((*s)[index+1:], (*s)[index:])
+ }
+ (*s)[index] = item
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *items) removeAt(index int) Item {
+ item := (*s)[index]
+ copy((*s)[index:], (*s)[index+1:])
+ (*s)[len(*s)-1] = nil
+ *s = (*s)[:len(*s)-1]
+ return item
+}
+
+// pop removes and returns the last element in the list.
+func (s *items) pop() (out Item) {
+ index := len(*s) - 1
+ out = (*s)[index]
+ (*s)[index] = nil
+ *s = (*s)[:index]
+ return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index items. index must be less than or equal to length.
+func (s *items) truncate(index int) {
+ var toClear items
+ *s, toClear = (*s)[:index], (*s)[index:]
+ for len(toClear) > 0 {
+ toClear = toClear[copy(toClear, nilItems):]
+ }
+}
+
+// find returns the index where the given item should be inserted into this
+// list. 'found' is true if the item already exists in the list at the given
+// index.
+func (s items) find(item Item) (index int, found bool) {
+ i := sort.Search(len(s), func(i int) bool {
+ return item.Less(s[i])
+ })
+ if i > 0 && !s[i-1].Less(item) {
+ return i - 1, true
+ }
+ return i, false
+}
+
+// children stores child nodes in a node.
+type children []*node
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *children) insertAt(index int, n *node) {
+ *s = append(*s, nil)
+ if index < len(*s) {
+ copy((*s)[index+1:], (*s)[index:])
+ }
+ (*s)[index] = n
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *children) removeAt(index int) *node {
+ n := (*s)[index]
+ copy((*s)[index:], (*s)[index+1:])
+ (*s)[len(*s)-1] = nil
+ *s = (*s)[:len(*s)-1]
+ return n
+}
+
+// pop removes and returns the last element in the list.
+func (s *children) pop() (out *node) {
+ index := len(*s) - 1
+ out = (*s)[index]
+ (*s)[index] = nil
+ *s = (*s)[:index]
+ return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index children. index must be less than or equal to length.
+func (s *children) truncate(index int) {
+ var toClear children
+ *s, toClear = (*s)[:index], (*s)[index:]
+ for len(toClear) > 0 {
+ toClear = toClear[copy(toClear, nilChildren):]
+ }
+}
+
+// node is an internal node in a tree.
+//
+// It must at all times maintain the invariant that either
+// * len(children) == 0, len(items) unconstrained
+// * len(children) == len(items) + 1
+type node struct {
+ items items
+ children children
+ cow *copyOnWriteContext
+}
+
+func (n *node) mutableFor(cow *copyOnWriteContext) *node {
+ if n.cow == cow {
+ return n
+ }
+ out := cow.newNode()
+ if cap(out.items) >= len(n.items) {
+ out.items = out.items[:len(n.items)]
+ } else {
+ out.items = make(items, len(n.items), cap(n.items))
+ }
+ copy(out.items, n.items)
+ // Copy children
+ if cap(out.children) >= len(n.children) {
+ out.children = out.children[:len(n.children)]
+ } else {
+ out.children = make(children, len(n.children), cap(n.children))
+ }
+ copy(out.children, n.children)
+ return out
+}
+
+func (n *node) mutableChild(i int) *node {
+ c := n.children[i].mutableFor(n.cow)
+ n.children[i] = c
+ return c
+}
+
+// split splits the given node at the given index. The current node shrinks,
+// and this function returns the item that existed at that index and a new node
+// containing all items/children after it.
+func (n *node) split(i int) (Item, *node) {
+ item := n.items[i]
+ next := n.cow.newNode()
+ next.items = append(next.items, n.items[i+1:]...)
+ n.items.truncate(i)
+ if len(n.children) > 0 {
+ next.children = append(next.children, n.children[i+1:]...)
+ n.children.truncate(i + 1)
+ }
+ return item, next
+}
+
+// maybeSplitChild checks if a child should be split, and if so splits it.
+// Returns whether or not a split occurred.
+func (n *node) maybeSplitChild(i, maxItems int) bool {
+ if len(n.children[i].items) < maxItems {
+ return false
+ }
+ first := n.mutableChild(i)
+ item, second := first.split(maxItems / 2)
+ n.items.insertAt(i, item)
+ n.children.insertAt(i+1, second)
+ return true
+}
+
+// insert inserts an item into the subtree rooted at this node, making sure
+// no nodes in the subtree exceed maxItems items. Should an equivalent item be
+// be found/replaced by insert, it will be returned.
+func (n *node) insert(item Item, maxItems int) Item {
+ i, found := n.items.find(item)
+ if found {
+ out := n.items[i]
+ n.items[i] = item
+ return out
+ }
+ if len(n.children) == 0 {
+ n.items.insertAt(i, item)
+ return nil
+ }
+ if n.maybeSplitChild(i, maxItems) {
+ inTree := n.items[i]
+ switch {
+ case item.Less(inTree):
+ // no change, we want first split node
+ case inTree.Less(item):
+ i++ // we want second split node
+ default:
+ out := n.items[i]
+ n.items[i] = item
+ return out
+ }
+ }
+ return n.mutableChild(i).insert(item, maxItems)
+}
+
+// get finds the given key in the subtree and returns it.
+func (n *node) get(key Item) Item {
+ i, found := n.items.find(key)
+ if found {
+ return n.items[i]
+ } else if len(n.children) > 0 {
+ return n.children[i].get(key)
+ }
+ return nil
+}
+
+// min returns the first item in the subtree.
+func min(n *node) Item {
+ if n == nil {
+ return nil
+ }
+ for len(n.children) > 0 {
+ n = n.children[0]
+ }
+ if len(n.items) == 0 {
+ return nil
+ }
+ return n.items[0]
+}
+
+// max returns the last item in the subtree.
+func max(n *node) Item {
+ if n == nil {
+ return nil
+ }
+ for len(n.children) > 0 {
+ n = n.children[len(n.children)-1]
+ }
+ if len(n.items) == 0 {
+ return nil
+ }
+ return n.items[len(n.items)-1]
+}
+
+// toRemove details what item to remove in a node.remove call.
+type toRemove int
+
+const (
+ removeItem toRemove = iota // removes the given item
+ removeMin // removes smallest item in the subtree
+ removeMax // removes largest item in the subtree
+)
+
+// remove removes an item from the subtree rooted at this node.
+func (n *node) remove(item Item, minItems int, typ toRemove) Item {
+ var i int
+ var found bool
+ switch typ {
+ case removeMax:
+ if len(n.children) == 0 {
+ return n.items.pop()
+ }
+ i = len(n.items)
+ case removeMin:
+ if len(n.children) == 0 {
+ return n.items.removeAt(0)
+ }
+ i = 0
+ case removeItem:
+ i, found = n.items.find(item)
+ if len(n.children) == 0 {
+ if found {
+ return n.items.removeAt(i)
+ }
+ return nil
+ }
+ default:
+ panic("invalid type")
+ }
+ // If we get to here, we have children.
+ if len(n.children[i].items) <= minItems {
+ return n.growChildAndRemove(i, item, minItems, typ)
+ }
+ child := n.mutableChild(i)
+ // Either we had enough items to begin with, or we've done some
+ // merging/stealing, because we've got enough now and we're ready to return
+ // stuff.
+ if found {
+ // The item exists at index 'i', and the child we've selected can give us a
+ // predecessor, since if we've gotten here it's got > minItems items in it.
+ out := n.items[i]
+ // We use our special-case 'remove' call with typ=maxItem to pull the
+ // predecessor of item i (the rightmost leaf of our immediate left child)
+ // and set it into where we pulled the item from.
+ n.items[i] = child.remove(nil, minItems, removeMax)
+ return out
+ }
+ // Final recursive call. Once we're here, we know that the item isn't in this
+ // node and that the child is big enough to remove from.
+ return child.remove(item, minItems, typ)
+}
+
+// growChildAndRemove grows child 'i' to make sure it's possible to remove an
+// item from it while keeping it at minItems, then calls remove to actually
+// remove it.
+//
+// Most documentation says we have to do two sets of special casing:
+// 1) item is in this node
+// 2) item is in child
+// In both cases, we need to handle the two subcases:
+// A) node has enough values that it can spare one
+// B) node doesn't have enough values
+// For the latter, we have to check:
+// a) left sibling has node to spare
+// b) right sibling has node to spare
+// c) we must merge
+// To simplify our code here, we handle cases #1 and #2 the same:
+// If a node doesn't have enough items, we make sure it does (using a,b,c).
+// We then simply redo our remove call, and the second time (regardless of
+// whether we're in case 1 or 2), we'll have enough items and can guarantee
+// that we hit case A.
+func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
+ if i > 0 && len(n.children[i-1].items) > minItems {
+ // Steal from left child
+ child := n.mutableChild(i)
+ stealFrom := n.mutableChild(i - 1)
+ stolenItem := stealFrom.items.pop()
+ child.items.insertAt(0, n.items[i-1])
+ n.items[i-1] = stolenItem
+ if len(stealFrom.children) > 0 {
+ child.children.insertAt(0, stealFrom.children.pop())
+ }
+ } else if i < len(n.items) && len(n.children[i+1].items) > minItems {
+ // steal from right child
+ child := n.mutableChild(i)
+ stealFrom := n.mutableChild(i + 1)
+ stolenItem := stealFrom.items.removeAt(0)
+ child.items = append(child.items, n.items[i])
+ n.items[i] = stolenItem
+ if len(stealFrom.children) > 0 {
+ child.children = append(child.children, stealFrom.children.removeAt(0))
+ }
+ } else {
+ if i >= len(n.items) {
+ i--
+ }
+ child := n.mutableChild(i)
+ // merge with right child
+ mergeItem := n.items.removeAt(i)
+ mergeChild := n.children.removeAt(i + 1).mutableFor(n.cow)
+ child.items = append(child.items, mergeItem)
+ child.items = append(child.items, mergeChild.items...)
+ child.children = append(child.children, mergeChild.children...)
+ n.cow.freeNode(mergeChild)
+ }
+ return n.remove(item, minItems, typ)
+}
+
+type direction int
+
+const (
+ descend = direction(-1)
+ ascend = direction(+1)
+)
+
+// iterate provides a simple method for iterating over elements in the tree.
+//
+// When ascending, the 'start' should be less than 'stop' and when descending,
+// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
+// will force the iterator to include the first item when it equals 'start',
+// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
+// "greaterThan" or "lessThan" queries.
+func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
+ var ok, found bool
+ var index int
+ switch dir {
+ case ascend:
+ if start != nil {
+ index, _ = n.items.find(start)
+ }
+ for i := index; i < len(n.items); i++ {
+ if len(n.children) > 0 {
+ if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
+ hit = true
+ continue
+ }
+ hit = true
+ if stop != nil && !n.items[i].Less(stop) {
+ return hit, false
+ }
+ if !iter(n.items[i]) {
+ return hit, false
+ }
+ }
+ if len(n.children) > 0 {
+ if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ case descend:
+ if start != nil {
+ index, found = n.items.find(start)
+ if !found {
+ index = index - 1
+ }
+ } else {
+ index = len(n.items) - 1
+ }
+ for i := index; i >= 0; i-- {
+ if start != nil && !n.items[i].Less(start) {
+ if !includeStart || hit || start.Less(n.items[i]) {
+ continue
+ }
+ }
+ if len(n.children) > 0 {
+ if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ if stop != nil && !stop.Less(n.items[i]) {
+ return hit, false // continue
+ }
+ hit = true
+ if !iter(n.items[i]) {
+ return hit, false
+ }
+ }
+ if len(n.children) > 0 {
+ if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ }
+ return hit, true
+}
+
+// Used for testing/debugging purposes.
+func (n *node) print(w io.Writer, level int) {
+ fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
+ for _, c := range n.children {
+ c.print(w, level+1)
+ }
+}
+
+// BTree is an implementation of a B-Tree.
+//
+// BTree stores Item instances in an ordered structure, allowing easy insertion,
+// removal, and iteration.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type BTree struct {
+ degree int
+ length int
+ root *node
+ cow *copyOnWriteContext
+}
+
+// copyOnWriteContext pointers determine node ownership... a tree with a write
+// context equivalent to a node's write context is allowed to modify that node.
+// A tree whose write context does not match a node's is not allowed to modify
+// it, and must create a new, writable copy (IE: it's a Clone).
+//
+// When doing any write operation, we maintain the invariant that the current
+// node's context is equal to the context of the tree that requested the write.
+// We do this by, before we descend into any node, creating a copy with the
+// correct context if the contexts don't match.
+//
+// Since the node we're currently visiting on any write has the requesting
+// tree's context, that node is modifiable in place. Children of that node may
+// not share context, but before we descend into them, we'll make a mutable
+// copy.
+type copyOnWriteContext struct {
+ freelist *FreeList
+}
+
+// Clone clones the btree, lazily. Clone should not be called concurrently,
+// but the original tree (t) and the new tree (t2) can be used concurrently
+// once the Clone call completes.
+//
+// The internal tree structure of b is marked read-only and shared between t and
+// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
+// whenever one of b's original nodes would have been modified. Read operations
+// should have no performance degredation. Write operations for both t and t2
+// will initially experience minor slow-downs caused by additional allocs and
+// copies due to the aforementioned copy-on-write logic, but should converge to
+// the original performance characteristics of the original tree.
+func (t *BTree) Clone() (t2 *BTree) {
+ // Create two entirely new copy-on-write contexts.
+ // This operation effectively creates three trees:
+ // the original, shared nodes (old b.cow)
+ // the new b.cow nodes
+ // the new out.cow nodes
+ cow1, cow2 := *t.cow, *t.cow
+ out := *t
+ t.cow = &cow1
+ out.cow = &cow2
+ return &out
+}
+
+// maxItems returns the max number of items to allow per node.
+func (t *BTree) maxItems() int {
+ return t.degree*2 - 1
+}
+
+// minItems returns the min number of items to allow per node (ignored for the
+// root node).
+func (t *BTree) minItems() int {
+ return t.degree - 1
+}
+
+func (c *copyOnWriteContext) newNode() (n *node) {
+ n = c.freelist.newNode()
+ n.cow = c
+ return
+}
+
+type freeType int
+
+const (
+ ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
+ ftStored // node was stored in the freelist for later use
+ ftNotOwned // node was ignored by COW, since it's owned by another one
+)
+
+// freeNode frees a node within a given COW context, if it's owned by that
+// context. It returns what happened to the node (see freeType const
+// documentation).
+func (c *copyOnWriteContext) freeNode(n *node) freeType {
+ if n.cow == c {
+ // clear to allow GC
+ n.items.truncate(0)
+ n.children.truncate(0)
+ n.cow = nil
+ if c.freelist.freeNode(n) {
+ return ftStored
+ } else {
+ return ftFreelistFull
+ }
+ } else {
+ return ftNotOwned
+ }
+}
+
+// ReplaceOrInsert adds the given item to the tree. If an item in the tree
+// already equals the given one, it is removed from the tree and returned.
+// Otherwise, nil is returned.
+//
+// nil cannot be added to the tree (will panic).
+func (t *BTree) ReplaceOrInsert(item Item) Item {
+ if item == nil {
+ panic("nil item being added to BTree")
+ }
+ if t.root == nil {
+ t.root = t.cow.newNode()
+ t.root.items = append(t.root.items, item)
+ t.length++
+ return nil
+ } else {
+ t.root = t.root.mutableFor(t.cow)
+ if len(t.root.items) >= t.maxItems() {
+ item2, second := t.root.split(t.maxItems() / 2)
+ oldroot := t.root
+ t.root = t.cow.newNode()
+ t.root.items = append(t.root.items, item2)
+ t.root.children = append(t.root.children, oldroot, second)
+ }
+ }
+ out := t.root.insert(item, t.maxItems())
+ if out == nil {
+ t.length++
+ }
+ return out
+}
+
+// Delete removes an item equal to the passed in item from the tree, returning
+// it. If no such item exists, returns nil.
+func (t *BTree) Delete(item Item) Item {
+ return t.deleteItem(item, removeItem)
+}
+
+// DeleteMin removes the smallest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMin() Item {
+ return t.deleteItem(nil, removeMin)
+}
+
+// DeleteMax removes the largest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMax() Item {
+ return t.deleteItem(nil, removeMax)
+}
+
+func (t *BTree) deleteItem(item Item, typ toRemove) Item {
+ if t.root == nil || len(t.root.items) == 0 {
+ return nil
+ }
+ t.root = t.root.mutableFor(t.cow)
+ out := t.root.remove(item, t.minItems(), typ)
+ if len(t.root.items) == 0 && len(t.root.children) > 0 {
+ oldroot := t.root
+ t.root = t.root.children[0]
+ t.cow.freeNode(oldroot)
+ }
+ if out != nil {
+ t.length--
+ }
+ return out
+}
+
+// AscendRange calls the iterator for every value in the tree within the range
+// [greaterOrEqual, lessThan), until iterator returns false.
+func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
+}
+
+// AscendLessThan calls the iterator for every value in the tree within the range
+// [first, pivot), until iterator returns false.
+func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, nil, pivot, false, false, iterator)
+}
+
+// AscendGreaterOrEqual calls the iterator for every value in the tree within
+// the range [pivot, last], until iterator returns false.
+func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, pivot, nil, true, false, iterator)
+}
+
+// Ascend calls the iterator for every value in the tree within the range
+// [first, last], until iterator returns false.
+func (t *BTree) Ascend(iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, nil, nil, false, false, iterator)
+}
+
+// DescendRange calls the iterator for every value in the tree within the range
+// [lessOrEqual, greaterThan), until iterator returns false.
+func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
+}
+
+// DescendLessOrEqual calls the iterator for every value in the tree within the range
+// [pivot, first], until iterator returns false.
+func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, pivot, nil, true, false, iterator)
+}
+
+// DescendGreaterThan calls the iterator for every value in the tree within
+// the range [last, pivot), until iterator returns false.
+func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, nil, pivot, false, false, iterator)
+}
+
+// Descend calls the iterator for every value in the tree within the range
+// [last, first], until iterator returns false.
+func (t *BTree) Descend(iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, nil, nil, false, false, iterator)
+}
+
+// Get looks for the key item in the tree, returning it. It returns nil if
+// unable to find that item.
+func (t *BTree) Get(key Item) Item {
+ if t.root == nil {
+ return nil
+ }
+ return t.root.get(key)
+}
+
+// Min returns the smallest item in the tree, or nil if the tree is empty.
+func (t *BTree) Min() Item {
+ return min(t.root)
+}
+
+// Max returns the largest item in the tree, or nil if the tree is empty.
+func (t *BTree) Max() Item {
+ return max(t.root)
+}
+
+// Has returns true if the given key is in the tree.
+func (t *BTree) Has(key Item) bool {
+ return t.Get(key) != nil
+}
+
+// Len returns the number of items currently in the tree.
+func (t *BTree) Len() int {
+ return t.length
+}
+
+// Clear removes all items from the btree. If addNodesToFreelist is true,
+// t's nodes are added to its freelist as part of this call, until the freelist
+// is full. Otherwise, the root node is simply dereferenced and the subtree
+// left to Go's normal GC processes.
+//
+// This can be much faster
+// than calling Delete on all elements, because that requires finding/removing
+// each element in the tree and updating the tree accordingly. It also is
+// somewhat faster than creating a new tree to replace the old one, because
+// nodes from the old tree are reclaimed into the freelist for use by the new
+// one, instead of being lost to the garbage collector.
+//
+// This call takes:
+// O(1): when addNodesToFreelist is false, this is a single operation.
+// O(1): when the freelist is already full, it breaks out immediately
+// O(freelist size): when the freelist is empty and the nodes are all owned
+// by this tree, nodes are added to the freelist until full.
+// O(tree size): when all nodes are owned by another tree, all nodes are
+// iterated over looking for nodes to add to the freelist, and due to
+// ownership, none are.
+func (t *BTree) Clear(addNodesToFreelist bool) {
+ if t.root != nil && addNodesToFreelist {
+ t.root.reset(t.cow)
+ }
+ t.root, t.length = nil, 0
+}
+
+// reset returns a subtree to the freelist. It breaks out immediately if the
+// freelist is full, since the only benefit of iterating is to fill that
+// freelist up. Returns true if parent reset call should continue.
+func (n *node) reset(c *copyOnWriteContext) bool {
+ for _, child := range n.children {
+ if !child.reset(c) {
+ return false
+ }
+ }
+ return c.freeNode(n) != ftFreelistFull
+}
+
+// Int implements the Item interface for integers.
+type Int int
+
+// Less returns true if int(a) < int(b).
+func (a Int) Less(b Item) bool {
+ return a < b.(Int)
+}
diff --git a/vendor/github.com/google/btree/btree_generic.go b/vendor/github.com/google/btree/btree_generic.go
new file mode 100644
index 000000000..e44a0f488
--- /dev/null
+++ b/vendor/github.com/google/btree/btree_generic.go
@@ -0,0 +1,1083 @@
+// Copyright 2014-2022 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.18
+// +build go1.18
+
+// In Go 1.18 and beyond, a BTreeG generic is created, and BTree is a specific
+// instantiation of that generic for the Item interface, with a backwards-
+// compatible API. Before go1.18, generics are not supported,
+// and BTree is just an implementation based around the Item interface.
+
+// Package btree implements in-memory B-Trees of arbitrary degree.
+//
+// btree implements an in-memory B-Tree for use as an ordered data structure.
+// It is not meant for persistent storage solutions.
+//
+// It has a flatter structure than an equivalent red-black or other binary tree,
+// which in some cases yields better memory usage and/or performance.
+// See some discussion on the matter here:
+// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
+// Note, though, that this project is in no way related to the C++ B-Tree
+// implementation written about there.
+//
+// Within this tree, each node contains a slice of items and a (possibly nil)
+// slice of children. For basic numeric values or raw structs, this can cause
+// efficiency differences when compared to equivalent C++ template code that
+// stores values in arrays within the node:
+// * Due to the overhead of storing values as interfaces (each
+// value needs to be stored as the value itself, then 2 words for the
+// interface pointing to that value and its type), resulting in higher
+// memory use.
+// * Since interfaces can point to values anywhere in memory, values are
+// most likely not stored in contiguous blocks, resulting in a higher
+// number of cache misses.
+// These issues don't tend to matter, though, when working with strings or other
+// heap-allocated structures, since C++-equivalent structures also must store
+// pointers and also distribute their values across the heap.
+//
+// This implementation is designed to be a drop-in replacement to gollrb.LLRB
+// trees, (http://github.com/petar/gollrb), an excellent and probably the most
+// widely used ordered tree implementation in the Go ecosystem currently.
+// Its functions, therefore, exactly mirror those of
+// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
+// support storing multiple equivalent values.
+//
+// There are two implementations; those suffixed with 'G' are generics, usable
+// for any type, and require a passed-in "less" function to define their ordering.
+// Those without this prefix are specific to the 'Item' interface, and use
+// its 'Less' function for ordering.
+package btree
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+ "sync"
+)
+
+// Item represents a single object in the tree.
+type Item interface {
+ // Less tests whether the current item is less than the given argument.
+ //
+ // This must provide a strict weak ordering.
+ // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
+ // hold one of either a or b in the tree).
+ Less(than Item) bool
+}
+
+const (
+ DefaultFreeListSize = 32
+)
+
+// FreeListG represents a free list of btree nodes. By default each
+// BTree has its own FreeList, but multiple BTrees can share the same
+// FreeList, in particular when they're created with Clone.
+// Two Btrees using the same freelist are safe for concurrent write access.
+type FreeListG[T any] struct {
+ mu sync.Mutex
+ freelist []*node[T]
+}
+
+// NewFreeListG creates a new free list.
+// size is the maximum size of the returned free list.
+func NewFreeListG[T any](size int) *FreeListG[T] {
+ return &FreeListG[T]{freelist: make([]*node[T], 0, size)}
+}
+
+func (f *FreeListG[T]) newNode() (n *node[T]) {
+ f.mu.Lock()
+ index := len(f.freelist) - 1
+ if index < 0 {
+ f.mu.Unlock()
+ return new(node[T])
+ }
+ n = f.freelist[index]
+ f.freelist[index] = nil
+ f.freelist = f.freelist[:index]
+ f.mu.Unlock()
+ return
+}
+
+func (f *FreeListG[T]) freeNode(n *node[T]) (out bool) {
+ f.mu.Lock()
+ if len(f.freelist) < cap(f.freelist) {
+ f.freelist = append(f.freelist, n)
+ out = true
+ }
+ f.mu.Unlock()
+ return
+}
+
+// ItemIteratorG allows callers of {A/De}scend* to iterate in-order over portions of
+// the tree. When this function returns false, iteration will stop and the
+// associated Ascend* function will immediately return.
+type ItemIteratorG[T any] func(item T) bool
+
+// Ordered represents the set of types for which the '<' operator work.
+type Ordered interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64 | ~string
+}
+
+// Less[T] returns a default LessFunc that uses the '<' operator for types that support it.
+func Less[T Ordered]() LessFunc[T] {
+ return func(a, b T) bool { return a < b }
+}
+
+// NewOrderedG creates a new B-Tree for ordered types.
+func NewOrderedG[T Ordered](degree int) *BTreeG[T] {
+ return NewG[T](degree, Less[T]())
+}
+
+// NewG creates a new B-Tree with the given degree.
+//
+// NewG(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
+// and 2-4 children).
+//
+// The passed-in LessFunc determines how objects of type T are ordered.
+func NewG[T any](degree int, less LessFunc[T]) *BTreeG[T] {
+ return NewWithFreeListG(degree, less, NewFreeListG[T](DefaultFreeListSize))
+}
+
+// NewWithFreeListG creates a new B-Tree that uses the given node free list.
+func NewWithFreeListG[T any](degree int, less LessFunc[T], f *FreeListG[T]) *BTreeG[T] {
+ if degree <= 1 {
+ panic("bad degree")
+ }
+ return &BTreeG[T]{
+ degree: degree,
+ cow: ©OnWriteContext[T]{freelist: f, less: less},
+ }
+}
+
+// items stores items in a node.
+type items[T any] []T
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *items[T]) insertAt(index int, item T) {
+ var zero T
+ *s = append(*s, zero)
+ if index < len(*s) {
+ copy((*s)[index+1:], (*s)[index:])
+ }
+ (*s)[index] = item
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *items[T]) removeAt(index int) T {
+ item := (*s)[index]
+ copy((*s)[index:], (*s)[index+1:])
+ var zero T
+ (*s)[len(*s)-1] = zero
+ *s = (*s)[:len(*s)-1]
+ return item
+}
+
+// pop removes and returns the last element in the list.
+func (s *items[T]) pop() (out T) {
+ index := len(*s) - 1
+ out = (*s)[index]
+ var zero T
+ (*s)[index] = zero
+ *s = (*s)[:index]
+ return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index items. index must be less than or equal to length.
+func (s *items[T]) truncate(index int) {
+ var toClear items[T]
+ *s, toClear = (*s)[:index], (*s)[index:]
+ var zero T
+ for i := 0; i < len(toClear); i++ {
+ toClear[i] = zero
+ }
+}
+
+// find returns the index where the given item should be inserted into this
+// list. 'found' is true if the item already exists in the list at the given
+// index.
+func (s items[T]) find(item T, less func(T, T) bool) (index int, found bool) {
+ i := sort.Search(len(s), func(i int) bool {
+ return less(item, s[i])
+ })
+ if i > 0 && !less(s[i-1], item) {
+ return i - 1, true
+ }
+ return i, false
+}
+
+// node is an internal node in a tree.
+//
+// It must at all times maintain the invariant that either
+// * len(children) == 0, len(items) unconstrained
+// * len(children) == len(items) + 1
+type node[T any] struct {
+ items items[T]
+ children items[*node[T]]
+ cow *copyOnWriteContext[T]
+}
+
+func (n *node[T]) mutableFor(cow *copyOnWriteContext[T]) *node[T] {
+ if n.cow == cow {
+ return n
+ }
+ out := cow.newNode()
+ if cap(out.items) >= len(n.items) {
+ out.items = out.items[:len(n.items)]
+ } else {
+ out.items = make(items[T], len(n.items), cap(n.items))
+ }
+ copy(out.items, n.items)
+ // Copy children
+ if cap(out.children) >= len(n.children) {
+ out.children = out.children[:len(n.children)]
+ } else {
+ out.children = make(items[*node[T]], len(n.children), cap(n.children))
+ }
+ copy(out.children, n.children)
+ return out
+}
+
+func (n *node[T]) mutableChild(i int) *node[T] {
+ c := n.children[i].mutableFor(n.cow)
+ n.children[i] = c
+ return c
+}
+
+// split splits the given node at the given index. The current node shrinks,
+// and this function returns the item that existed at that index and a new node
+// containing all items/children after it.
+func (n *node[T]) split(i int) (T, *node[T]) {
+ item := n.items[i]
+ next := n.cow.newNode()
+ next.items = append(next.items, n.items[i+1:]...)
+ n.items.truncate(i)
+ if len(n.children) > 0 {
+ next.children = append(next.children, n.children[i+1:]...)
+ n.children.truncate(i + 1)
+ }
+ return item, next
+}
+
+// maybeSplitChild checks if a child should be split, and if so splits it.
+// Returns whether or not a split occurred.
+func (n *node[T]) maybeSplitChild(i, maxItems int) bool {
+ if len(n.children[i].items) < maxItems {
+ return false
+ }
+ first := n.mutableChild(i)
+ item, second := first.split(maxItems / 2)
+ n.items.insertAt(i, item)
+ n.children.insertAt(i+1, second)
+ return true
+}
+
+// insert inserts an item into the subtree rooted at this node, making sure
+// no nodes in the subtree exceed maxItems items. Should an equivalent item be
+// be found/replaced by insert, it will be returned.
+func (n *node[T]) insert(item T, maxItems int) (_ T, _ bool) {
+ i, found := n.items.find(item, n.cow.less)
+ if found {
+ out := n.items[i]
+ n.items[i] = item
+ return out, true
+ }
+ if len(n.children) == 0 {
+ n.items.insertAt(i, item)
+ return
+ }
+ if n.maybeSplitChild(i, maxItems) {
+ inTree := n.items[i]
+ switch {
+ case n.cow.less(item, inTree):
+ // no change, we want first split node
+ case n.cow.less(inTree, item):
+ i++ // we want second split node
+ default:
+ out := n.items[i]
+ n.items[i] = item
+ return out, true
+ }
+ }
+ return n.mutableChild(i).insert(item, maxItems)
+}
+
+// get finds the given key in the subtree and returns it.
+func (n *node[T]) get(key T) (_ T, _ bool) {
+ i, found := n.items.find(key, n.cow.less)
+ if found {
+ return n.items[i], true
+ } else if len(n.children) > 0 {
+ return n.children[i].get(key)
+ }
+ return
+}
+
+// min returns the first item in the subtree.
+func min[T any](n *node[T]) (_ T, found bool) {
+ if n == nil {
+ return
+ }
+ for len(n.children) > 0 {
+ n = n.children[0]
+ }
+ if len(n.items) == 0 {
+ return
+ }
+ return n.items[0], true
+}
+
+// max returns the last item in the subtree.
+func max[T any](n *node[T]) (_ T, found bool) {
+ if n == nil {
+ return
+ }
+ for len(n.children) > 0 {
+ n = n.children[len(n.children)-1]
+ }
+ if len(n.items) == 0 {
+ return
+ }
+ return n.items[len(n.items)-1], true
+}
+
+// toRemove details what item to remove in a node.remove call.
+type toRemove int
+
+const (
+ removeItem toRemove = iota // removes the given item
+ removeMin // removes smallest item in the subtree
+ removeMax // removes largest item in the subtree
+)
+
+// remove removes an item from the subtree rooted at this node.
+func (n *node[T]) remove(item T, minItems int, typ toRemove) (_ T, _ bool) {
+ var i int
+ var found bool
+ switch typ {
+ case removeMax:
+ if len(n.children) == 0 {
+ return n.items.pop(), true
+ }
+ i = len(n.items)
+ case removeMin:
+ if len(n.children) == 0 {
+ return n.items.removeAt(0), true
+ }
+ i = 0
+ case removeItem:
+ i, found = n.items.find(item, n.cow.less)
+ if len(n.children) == 0 {
+ if found {
+ return n.items.removeAt(i), true
+ }
+ return
+ }
+ default:
+ panic("invalid type")
+ }
+ // If we get to here, we have children.
+ if len(n.children[i].items) <= minItems {
+ return n.growChildAndRemove(i, item, minItems, typ)
+ }
+ child := n.mutableChild(i)
+ // Either we had enough items to begin with, or we've done some
+ // merging/stealing, because we've got enough now and we're ready to return
+ // stuff.
+ if found {
+ // The item exists at index 'i', and the child we've selected can give us a
+ // predecessor, since if we've gotten here it's got > minItems items in it.
+ out := n.items[i]
+ // We use our special-case 'remove' call with typ=maxItem to pull the
+ // predecessor of item i (the rightmost leaf of our immediate left child)
+ // and set it into where we pulled the item from.
+ var zero T
+ n.items[i], _ = child.remove(zero, minItems, removeMax)
+ return out, true
+ }
+ // Final recursive call. Once we're here, we know that the item isn't in this
+ // node and that the child is big enough to remove from.
+ return child.remove(item, minItems, typ)
+}
+
+// growChildAndRemove grows child 'i' to make sure it's possible to remove an
+// item from it while keeping it at minItems, then calls remove to actually
+// remove it.
+//
+// Most documentation says we have to do two sets of special casing:
+// 1) item is in this node
+// 2) item is in child
+// In both cases, we need to handle the two subcases:
+// A) node has enough values that it can spare one
+// B) node doesn't have enough values
+// For the latter, we have to check:
+// a) left sibling has node to spare
+// b) right sibling has node to spare
+// c) we must merge
+// To simplify our code here, we handle cases #1 and #2 the same:
+// If a node doesn't have enough items, we make sure it does (using a,b,c).
+// We then simply redo our remove call, and the second time (regardless of
+// whether we're in case 1 or 2), we'll have enough items and can guarantee
+// that we hit case A.
+func (n *node[T]) growChildAndRemove(i int, item T, minItems int, typ toRemove) (T, bool) {
+ if i > 0 && len(n.children[i-1].items) > minItems {
+ // Steal from left child
+ child := n.mutableChild(i)
+ stealFrom := n.mutableChild(i - 1)
+ stolenItem := stealFrom.items.pop()
+ child.items.insertAt(0, n.items[i-1])
+ n.items[i-1] = stolenItem
+ if len(stealFrom.children) > 0 {
+ child.children.insertAt(0, stealFrom.children.pop())
+ }
+ } else if i < len(n.items) && len(n.children[i+1].items) > minItems {
+ // steal from right child
+ child := n.mutableChild(i)
+ stealFrom := n.mutableChild(i + 1)
+ stolenItem := stealFrom.items.removeAt(0)
+ child.items = append(child.items, n.items[i])
+ n.items[i] = stolenItem
+ if len(stealFrom.children) > 0 {
+ child.children = append(child.children, stealFrom.children.removeAt(0))
+ }
+ } else {
+ if i >= len(n.items) {
+ i--
+ }
+ child := n.mutableChild(i)
+ // merge with right child
+ mergeItem := n.items.removeAt(i)
+ mergeChild := n.children.removeAt(i + 1)
+ child.items = append(child.items, mergeItem)
+ child.items = append(child.items, mergeChild.items...)
+ child.children = append(child.children, mergeChild.children...)
+ n.cow.freeNode(mergeChild)
+ }
+ return n.remove(item, minItems, typ)
+}
+
+type direction int
+
+const (
+ descend = direction(-1)
+ ascend = direction(+1)
+)
+
+type optionalItem[T any] struct {
+ item T
+ valid bool
+}
+
+func optional[T any](item T) optionalItem[T] {
+ return optionalItem[T]{item: item, valid: true}
+}
+func empty[T any]() optionalItem[T] {
+ return optionalItem[T]{}
+}
+
+// iterate provides a simple method for iterating over elements in the tree.
+//
+// When ascending, the 'start' should be less than 'stop' and when descending,
+// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
+// will force the iterator to include the first item when it equals 'start',
+// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
+// "greaterThan" or "lessThan" queries.
+func (n *node[T]) iterate(dir direction, start, stop optionalItem[T], includeStart bool, hit bool, iter ItemIteratorG[T]) (bool, bool) {
+ var ok, found bool
+ var index int
+ switch dir {
+ case ascend:
+ if start.valid {
+ index, _ = n.items.find(start.item, n.cow.less)
+ }
+ for i := index; i < len(n.items); i++ {
+ if len(n.children) > 0 {
+ if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ if !includeStart && !hit && start.valid && !n.cow.less(start.item, n.items[i]) {
+ hit = true
+ continue
+ }
+ hit = true
+ if stop.valid && !n.cow.less(n.items[i], stop.item) {
+ return hit, false
+ }
+ if !iter(n.items[i]) {
+ return hit, false
+ }
+ }
+ if len(n.children) > 0 {
+ if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ case descend:
+ if start.valid {
+ index, found = n.items.find(start.item, n.cow.less)
+ if !found {
+ index = index - 1
+ }
+ } else {
+ index = len(n.items) - 1
+ }
+ for i := index; i >= 0; i-- {
+ if start.valid && !n.cow.less(n.items[i], start.item) {
+ if !includeStart || hit || n.cow.less(start.item, n.items[i]) {
+ continue
+ }
+ }
+ if len(n.children) > 0 {
+ if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ if stop.valid && !n.cow.less(stop.item, n.items[i]) {
+ return hit, false // continue
+ }
+ hit = true
+ if !iter(n.items[i]) {
+ return hit, false
+ }
+ }
+ if len(n.children) > 0 {
+ if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ }
+ return hit, true
+}
+
+// print is used for testing/debugging purposes.
+func (n *node[T]) print(w io.Writer, level int) {
+ fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
+ for _, c := range n.children {
+ c.print(w, level+1)
+ }
+}
+
+// BTreeG is a generic implementation of a B-Tree.
+//
+// BTreeG stores items of type T in an ordered structure, allowing easy insertion,
+// removal, and iteration.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type BTreeG[T any] struct {
+ degree int
+ length int
+ root *node[T]
+ cow *copyOnWriteContext[T]
+}
+
+// LessFunc[T] determines how to order a type 'T'. It should implement a strict
+// ordering, and should return true if within that ordering, 'a' < 'b'.
+type LessFunc[T any] func(a, b T) bool
+
+// copyOnWriteContext pointers determine node ownership... a tree with a write
+// context equivalent to a node's write context is allowed to modify that node.
+// A tree whose write context does not match a node's is not allowed to modify
+// it, and must create a new, writable copy (IE: it's a Clone).
+//
+// When doing any write operation, we maintain the invariant that the current
+// node's context is equal to the context of the tree that requested the write.
+// We do this by, before we descend into any node, creating a copy with the
+// correct context if the contexts don't match.
+//
+// Since the node we're currently visiting on any write has the requesting
+// tree's context, that node is modifiable in place. Children of that node may
+// not share context, but before we descend into them, we'll make a mutable
+// copy.
+type copyOnWriteContext[T any] struct {
+ freelist *FreeListG[T]
+ less LessFunc[T]
+}
+
+// Clone clones the btree, lazily. Clone should not be called concurrently,
+// but the original tree (t) and the new tree (t2) can be used concurrently
+// once the Clone call completes.
+//
+// The internal tree structure of b is marked read-only and shared between t and
+// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
+// whenever one of b's original nodes would have been modified. Read operations
+// should have no performance degredation. Write operations for both t and t2
+// will initially experience minor slow-downs caused by additional allocs and
+// copies due to the aforementioned copy-on-write logic, but should converge to
+// the original performance characteristics of the original tree.
+func (t *BTreeG[T]) Clone() (t2 *BTreeG[T]) {
+ // Create two entirely new copy-on-write contexts.
+ // This operation effectively creates three trees:
+ // the original, shared nodes (old b.cow)
+ // the new b.cow nodes
+ // the new out.cow nodes
+ cow1, cow2 := *t.cow, *t.cow
+ out := *t
+ t.cow = &cow1
+ out.cow = &cow2
+ return &out
+}
+
+// maxItems returns the max number of items to allow per node.
+func (t *BTreeG[T]) maxItems() int {
+ return t.degree*2 - 1
+}
+
+// minItems returns the min number of items to allow per node (ignored for the
+// root node).
+func (t *BTreeG[T]) minItems() int {
+ return t.degree - 1
+}
+
+func (c *copyOnWriteContext[T]) newNode() (n *node[T]) {
+ n = c.freelist.newNode()
+ n.cow = c
+ return
+}
+
+type freeType int
+
+const (
+ ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
+ ftStored // node was stored in the freelist for later use
+ ftNotOwned // node was ignored by COW, since it's owned by another one
+)
+
+// freeNode frees a node within a given COW context, if it's owned by that
+// context. It returns what happened to the node (see freeType const
+// documentation).
+func (c *copyOnWriteContext[T]) freeNode(n *node[T]) freeType {
+ if n.cow == c {
+ // clear to allow GC
+ n.items.truncate(0)
+ n.children.truncate(0)
+ n.cow = nil
+ if c.freelist.freeNode(n) {
+ return ftStored
+ } else {
+ return ftFreelistFull
+ }
+ } else {
+ return ftNotOwned
+ }
+}
+
+// ReplaceOrInsert adds the given item to the tree. If an item in the tree
+// already equals the given one, it is removed from the tree and returned,
+// and the second return value is true. Otherwise, (zeroValue, false)
+//
+// nil cannot be added to the tree (will panic).
+func (t *BTreeG[T]) ReplaceOrInsert(item T) (_ T, _ bool) {
+ if t.root == nil {
+ t.root = t.cow.newNode()
+ t.root.items = append(t.root.items, item)
+ t.length++
+ return
+ } else {
+ t.root = t.root.mutableFor(t.cow)
+ if len(t.root.items) >= t.maxItems() {
+ item2, second := t.root.split(t.maxItems() / 2)
+ oldroot := t.root
+ t.root = t.cow.newNode()
+ t.root.items = append(t.root.items, item2)
+ t.root.children = append(t.root.children, oldroot, second)
+ }
+ }
+ out, outb := t.root.insert(item, t.maxItems())
+ if !outb {
+ t.length++
+ }
+ return out, outb
+}
+
+// Delete removes an item equal to the passed in item from the tree, returning
+// it. If no such item exists, returns (zeroValue, false).
+func (t *BTreeG[T]) Delete(item T) (T, bool) {
+ return t.deleteItem(item, removeItem)
+}
+
+// DeleteMin removes the smallest item in the tree and returns it.
+// If no such item exists, returns (zeroValue, false).
+func (t *BTreeG[T]) DeleteMin() (T, bool) {
+ var zero T
+ return t.deleteItem(zero, removeMin)
+}
+
+// DeleteMax removes the largest item in the tree and returns it.
+// If no such item exists, returns (zeroValue, false).
+func (t *BTreeG[T]) DeleteMax() (T, bool) {
+ var zero T
+ return t.deleteItem(zero, removeMax)
+}
+
+func (t *BTreeG[T]) deleteItem(item T, typ toRemove) (_ T, _ bool) {
+ if t.root == nil || len(t.root.items) == 0 {
+ return
+ }
+ t.root = t.root.mutableFor(t.cow)
+ out, outb := t.root.remove(item, t.minItems(), typ)
+ if len(t.root.items) == 0 && len(t.root.children) > 0 {
+ oldroot := t.root
+ t.root = t.root.children[0]
+ t.cow.freeNode(oldroot)
+ }
+ if outb {
+ t.length--
+ }
+ return out, outb
+}
+
+// AscendRange calls the iterator for every value in the tree within the range
+// [greaterOrEqual, lessThan), until iterator returns false.
+func (t *BTreeG[T]) AscendRange(greaterOrEqual, lessThan T, iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, optional[T](greaterOrEqual), optional[T](lessThan), true, false, iterator)
+}
+
+// AscendLessThan calls the iterator for every value in the tree within the range
+// [first, pivot), until iterator returns false.
+func (t *BTreeG[T]) AscendLessThan(pivot T, iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, empty[T](), optional(pivot), false, false, iterator)
+}
+
+// AscendGreaterOrEqual calls the iterator for every value in the tree within
+// the range [pivot, last], until iterator returns false.
+func (t *BTreeG[T]) AscendGreaterOrEqual(pivot T, iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, optional[T](pivot), empty[T](), true, false, iterator)
+}
+
+// Ascend calls the iterator for every value in the tree within the range
+// [first, last], until iterator returns false.
+func (t *BTreeG[T]) Ascend(iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, empty[T](), empty[T](), false, false, iterator)
+}
+
+// DescendRange calls the iterator for every value in the tree within the range
+// [lessOrEqual, greaterThan), until iterator returns false.
+func (t *BTreeG[T]) DescendRange(lessOrEqual, greaterThan T, iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, optional[T](lessOrEqual), optional[T](greaterThan), true, false, iterator)
+}
+
+// DescendLessOrEqual calls the iterator for every value in the tree within the range
+// [pivot, first], until iterator returns false.
+func (t *BTreeG[T]) DescendLessOrEqual(pivot T, iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, optional[T](pivot), empty[T](), true, false, iterator)
+}
+
+// DescendGreaterThan calls the iterator for every value in the tree within
+// the range [last, pivot), until iterator returns false.
+func (t *BTreeG[T]) DescendGreaterThan(pivot T, iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, empty[T](), optional[T](pivot), false, false, iterator)
+}
+
+// Descend calls the iterator for every value in the tree within the range
+// [last, first], until iterator returns false.
+func (t *BTreeG[T]) Descend(iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, empty[T](), empty[T](), false, false, iterator)
+}
+
+// Get looks for the key item in the tree, returning it. It returns
+// (zeroValue, false) if unable to find that item.
+func (t *BTreeG[T]) Get(key T) (_ T, _ bool) {
+ if t.root == nil {
+ return
+ }
+ return t.root.get(key)
+}
+
+// Min returns the smallest item in the tree, or (zeroValue, false) if the tree is empty.
+func (t *BTreeG[T]) Min() (_ T, _ bool) {
+ return min(t.root)
+}
+
+// Max returns the largest item in the tree, or (zeroValue, false) if the tree is empty.
+func (t *BTreeG[T]) Max() (_ T, _ bool) {
+ return max(t.root)
+}
+
+// Has returns true if the given key is in the tree.
+func (t *BTreeG[T]) Has(key T) bool {
+ _, ok := t.Get(key)
+ return ok
+}
+
+// Len returns the number of items currently in the tree.
+func (t *BTreeG[T]) Len() int {
+ return t.length
+}
+
+// Clear removes all items from the btree. If addNodesToFreelist is true,
+// t's nodes are added to its freelist as part of this call, until the freelist
+// is full. Otherwise, the root node is simply dereferenced and the subtree
+// left to Go's normal GC processes.
+//
+// This can be much faster
+// than calling Delete on all elements, because that requires finding/removing
+// each element in the tree and updating the tree accordingly. It also is
+// somewhat faster than creating a new tree to replace the old one, because
+// nodes from the old tree are reclaimed into the freelist for use by the new
+// one, instead of being lost to the garbage collector.
+//
+// This call takes:
+// O(1): when addNodesToFreelist is false, this is a single operation.
+// O(1): when the freelist is already full, it breaks out immediately
+// O(freelist size): when the freelist is empty and the nodes are all owned
+// by this tree, nodes are added to the freelist until full.
+// O(tree size): when all nodes are owned by another tree, all nodes are
+// iterated over looking for nodes to add to the freelist, and due to
+// ownership, none are.
+func (t *BTreeG[T]) Clear(addNodesToFreelist bool) {
+ if t.root != nil && addNodesToFreelist {
+ t.root.reset(t.cow)
+ }
+ t.root, t.length = nil, 0
+}
+
+// reset returns a subtree to the freelist. It breaks out immediately if the
+// freelist is full, since the only benefit of iterating is to fill that
+// freelist up. Returns true if parent reset call should continue.
+func (n *node[T]) reset(c *copyOnWriteContext[T]) bool {
+ for _, child := range n.children {
+ if !child.reset(c) {
+ return false
+ }
+ }
+ return c.freeNode(n) != ftFreelistFull
+}
+
+// Int implements the Item interface for integers.
+type Int int
+
+// Less returns true if int(a) < int(b).
+func (a Int) Less(b Item) bool {
+ return a < b.(Int)
+}
+
+// BTree is an implementation of a B-Tree.
+//
+// BTree stores Item instances in an ordered structure, allowing easy insertion,
+// removal, and iteration.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type BTree BTreeG[Item]
+
+var itemLess LessFunc[Item] = func(a, b Item) bool {
+ return a.Less(b)
+}
+
+// New creates a new B-Tree with the given degree.
+//
+// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
+// and 2-4 children).
+func New(degree int) *BTree {
+ return (*BTree)(NewG[Item](degree, itemLess))
+}
+
+// FreeList represents a free list of btree nodes. By default each
+// BTree has its own FreeList, but multiple BTrees can share the same
+// FreeList.
+// Two Btrees using the same freelist are safe for concurrent write access.
+type FreeList FreeListG[Item]
+
+// NewFreeList creates a new free list.
+// size is the maximum size of the returned free list.
+func NewFreeList(size int) *FreeList {
+ return (*FreeList)(NewFreeListG[Item](size))
+}
+
+// NewWithFreeList creates a new B-Tree that uses the given node free list.
+func NewWithFreeList(degree int, f *FreeList) *BTree {
+ return (*BTree)(NewWithFreeListG[Item](degree, itemLess, (*FreeListG[Item])(f)))
+}
+
+// ItemIterator allows callers of Ascend* to iterate in-order over portions of
+// the tree. When this function returns false, iteration will stop and the
+// associated Ascend* function will immediately return.
+type ItemIterator ItemIteratorG[Item]
+
+// Clone clones the btree, lazily. Clone should not be called concurrently,
+// but the original tree (t) and the new tree (t2) can be used concurrently
+// once the Clone call completes.
+//
+// The internal tree structure of b is marked read-only and shared between t and
+// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
+// whenever one of b's original nodes would have been modified. Read operations
+// should have no performance degredation. Write operations for both t and t2
+// will initially experience minor slow-downs caused by additional allocs and
+// copies due to the aforementioned copy-on-write logic, but should converge to
+// the original performance characteristics of the original tree.
+func (t *BTree) Clone() (t2 *BTree) {
+ return (*BTree)((*BTreeG[Item])(t).Clone())
+}
+
+// Delete removes an item equal to the passed in item from the tree, returning
+// it. If no such item exists, returns nil.
+func (t *BTree) Delete(item Item) Item {
+ i, _ := (*BTreeG[Item])(t).Delete(item)
+ return i
+}
+
+// DeleteMax removes the largest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMax() Item {
+ i, _ := (*BTreeG[Item])(t).DeleteMax()
+ return i
+}
+
+// DeleteMin removes the smallest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMin() Item {
+ i, _ := (*BTreeG[Item])(t).DeleteMin()
+ return i
+}
+
+// Get looks for the key item in the tree, returning it. It returns nil if
+// unable to find that item.
+func (t *BTree) Get(key Item) Item {
+ i, _ := (*BTreeG[Item])(t).Get(key)
+ return i
+}
+
+// Max returns the largest item in the tree, or nil if the tree is empty.
+func (t *BTree) Max() Item {
+ i, _ := (*BTreeG[Item])(t).Max()
+ return i
+}
+
+// Min returns the smallest item in the tree, or nil if the tree is empty.
+func (t *BTree) Min() Item {
+ i, _ := (*BTreeG[Item])(t).Min()
+ return i
+}
+
+// Has returns true if the given key is in the tree.
+func (t *BTree) Has(key Item) bool {
+ return (*BTreeG[Item])(t).Has(key)
+}
+
+// ReplaceOrInsert adds the given item to the tree. If an item in the tree
+// already equals the given one, it is removed from the tree and returned.
+// Otherwise, nil is returned.
+//
+// nil cannot be added to the tree (will panic).
+func (t *BTree) ReplaceOrInsert(item Item) Item {
+ i, _ := (*BTreeG[Item])(t).ReplaceOrInsert(item)
+ return i
+}
+
+// AscendRange calls the iterator for every value in the tree within the range
+// [greaterOrEqual, lessThan), until iterator returns false.
+func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
+ (*BTreeG[Item])(t).AscendRange(greaterOrEqual, lessThan, (ItemIteratorG[Item])(iterator))
+}
+
+// AscendLessThan calls the iterator for every value in the tree within the range
+// [first, pivot), until iterator returns false.
+func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
+ (*BTreeG[Item])(t).AscendLessThan(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// AscendGreaterOrEqual calls the iterator for every value in the tree within
+// the range [pivot, last], until iterator returns false.
+func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
+ (*BTreeG[Item])(t).AscendGreaterOrEqual(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// Ascend calls the iterator for every value in the tree within the range
+// [first, last], until iterator returns false.
+func (t *BTree) Ascend(iterator ItemIterator) {
+ (*BTreeG[Item])(t).Ascend((ItemIteratorG[Item])(iterator))
+}
+
+// DescendRange calls the iterator for every value in the tree within the range
+// [lessOrEqual, greaterThan), until iterator returns false.
+func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
+ (*BTreeG[Item])(t).DescendRange(lessOrEqual, greaterThan, (ItemIteratorG[Item])(iterator))
+}
+
+// DescendLessOrEqual calls the iterator for every value in the tree within the range
+// [pivot, first], until iterator returns false.
+func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
+ (*BTreeG[Item])(t).DescendLessOrEqual(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// DescendGreaterThan calls the iterator for every value in the tree within
+// the range [last, pivot), until iterator returns false.
+func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
+ (*BTreeG[Item])(t).DescendGreaterThan(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// Descend calls the iterator for every value in the tree within the range
+// [last, first], until iterator returns false.
+func (t *BTree) Descend(iterator ItemIterator) {
+ (*BTreeG[Item])(t).Descend((ItemIteratorG[Item])(iterator))
+}
+
+// Len returns the number of items currently in the tree.
+func (t *BTree) Len() int {
+ return (*BTreeG[Item])(t).Len()
+}
+
+// Clear removes all items from the btree. If addNodesToFreelist is true,
+// t's nodes are added to its freelist as part of this call, until the freelist
+// is full. Otherwise, the root node is simply dereferenced and the subtree
+// left to Go's normal GC processes.
+//
+// This can be much faster
+// than calling Delete on all elements, because that requires finding/removing
+// each element in the tree and updating the tree accordingly. It also is
+// somewhat faster than creating a new tree to replace the old one, because
+// nodes from the old tree are reclaimed into the freelist for use by the new
+// one, instead of being lost to the garbage collector.
+//
+// This call takes:
+// O(1): when addNodesToFreelist is false, this is a single operation.
+// O(1): when the freelist is already full, it breaks out immediately
+// O(freelist size): when the freelist is empty and the nodes are all owned
+// by this tree, nodes are added to the freelist until full.
+// O(tree size): when all nodes are owned by another tree, all nodes are
+// iterated over looking for nodes to add to the freelist, and due to
+// ownership, none are.
+func (t *BTree) Clear(addNodesToFreelist bool) {
+ (*BTreeG[Item])(t).Clear(addNodesToFreelist)
+}
diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel
index 0905f6353..c12e4904d 100644
--- a/vendor/github.com/google/cel-go/cel/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/cel/BUILD.bazel
@@ -10,13 +10,18 @@ go_library(
"cel.go",
"decls.go",
"env.go",
+ "folding.go",
+ "inlining.go",
"io.go",
"library.go",
"macro.go",
+ "optimizer.go",
"options.go",
"program.go",
+ "prompt.go",
"validator.go",
],
+ embedsrcs = ["//cel/templates"],
importpath = "github.com/google/cel-go/cel",
visibility = ["//visibility:public"],
deps = [
@@ -26,6 +31,7 @@ go_library(
"//common/ast:go_default_library",
"//common/containers:go_default_library",
"//common/decls:go_default_library",
+ "//common/env:go_default_library",
"//common/functions:go_default_library",
"//common/operators:go_default_library",
"//common/overloads:go_default_library",
@@ -36,6 +42,7 @@ go_library(
"//common/types/traits:go_default_library",
"//interpreter:go_default_library",
"//parser:go_default_library",
+ "@dev_cel_expr//:expr",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//reflect/protodesc:go_default_library",
@@ -56,7 +63,12 @@ go_test(
"cel_test.go",
"decls_test.go",
"env_test.go",
+ "folding_test.go",
+ "inlining_test.go",
"io_test.go",
+ "optimizer_test.go",
+ "prompt_test.go",
+ "validator_test.go",
],
data = [
"//cel/testdata:gen_test_fds",
@@ -64,19 +76,22 @@ go_test(
embed = [
":go_default_library",
],
+ embedsrcs = [
+ "//cel/testdata:prompts",
+ ],
deps = [
"//common/operators:go_default_library",
"//common/overloads:go_default_library",
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
+ "//ext:go_default_library",
"//test:go_default_library",
"//test/proto2pb:go_default_library",
"//test/proto3pb:go_default_library",
- "@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//encoding/prototext:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
"@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
],
diff --git a/vendor/github.com/google/cel-go/cel/decls.go b/vendor/github.com/google/cel-go/cel/decls.go
index 0f9501341..4d4873bd6 100644
--- a/vendor/github.com/google/cel-go/cel/decls.go
+++ b/vendor/github.com/google/cel-go/cel/decls.go
@@ -23,6 +23,7 @@ import (
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
+ celpb "cel.dev/expr"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
@@ -141,8 +142,23 @@ func Constant(name string, t *Type, v ref.Val) EnvOption {
// Variable creates an instance of a variable declaration with a variable name and type.
func Variable(name string, t *Type) EnvOption {
+ return VariableWithDoc(name, t, "")
+}
+
+// VariableWithDoc creates an instance of a variable declaration with a variable name, type, and doc string.
+func VariableWithDoc(name string, t *Type, doc string) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.variables = append(e.variables, decls.NewVariableWithDoc(name, t, doc))
+ return e, nil
+ }
+}
+
+// VariableDecls configures a set of fully defined cel.VariableDecl instances in the environment.
+func VariableDecls(vars ...*decls.VariableDecl) EnvOption {
return func(e *Env) (*Env, error) {
- e.variables = append(e.variables, decls.NewVariable(name, t))
+ for _, v := range vars {
+ e.variables = append(e.variables, v)
+ }
return e, nil
}
}
@@ -182,13 +198,38 @@ func Function(name string, opts ...FunctionOpt) EnvOption {
if err != nil {
return nil, err
}
- if existing, found := e.functions[fn.Name()]; found {
- fn, err = existing.Merge(fn)
- if err != nil {
- return nil, err
+ return FunctionDecls(fn)(e)
+ }
+}
+
+// OverloadSelector selects an overload associated with a given function when it returns true.
+//
+// Used in combination with the FunctionDecl.Subset method.
+type OverloadSelector = decls.OverloadSelector
+
+// IncludeOverloads defines an OverloadSelector which allow-lists a set of overloads by their ids.
+func IncludeOverloads(overloadIDs ...string) OverloadSelector {
+ return decls.IncludeOverloads(overloadIDs...)
+}
+
+// ExcludeOverloads defines an OverloadSelector which deny-lists a set of overloads by their ids.
+func ExcludeOverloads(overloadIDs ...string) OverloadSelector {
+ return decls.ExcludeOverloads(overloadIDs...)
+}
+
+// FunctionDecls provides one or more fully formed function declarations to be added to the environment.
+func FunctionDecls(funcs ...*decls.FunctionDecl) EnvOption {
+ return func(e *Env) (*Env, error) {
+ var err error
+ for _, fn := range funcs {
+ if existing, found := e.functions[fn.Name()]; found {
+ fn, err = existing.Merge(fn)
+ if err != nil {
+ return nil, err
+ }
}
+ e.functions[fn.Name()] = fn
}
- e.functions[fn.Name()] = fn
return e, nil
}
}
@@ -196,6 +237,13 @@ func Function(name string, opts ...FunctionOpt) EnvOption {
// FunctionOpt defines a functional option for configuring a function declaration.
type FunctionOpt = decls.FunctionOpt
+// FunctionDocs provides a general usage documentation for the function.
+//
+// Use OverloadExamples to provide example usage instructions for specific overloads.
+func FunctionDocs(docs ...string) FunctionOpt {
+ return decls.FunctionDocs(docs...)
+}
+
// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads.
//
// Note, this approach works well if operand is expected to have a specific trait which it implements,
@@ -269,6 +317,11 @@ func MemberOverload(overloadID string, args []*Type, resultType *Type, opts ...O
// OverloadOpt is a functional option for configuring a function overload.
type OverloadOpt = decls.OverloadOpt
+// OverloadExamples configures an example of how to invoke the overload.
+func OverloadExamples(docs ...string) OverloadOpt {
+ return decls.OverloadExamples(docs...)
+}
+
// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime
// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
func UnaryBinding(binding functions.UnaryOp) OverloadOpt {
@@ -287,6 +340,12 @@ func FunctionBinding(binding functions.FunctionOp) OverloadOpt {
return decls.FunctionBinding(binding)
}
+// LateFunctionBinding indicates that the function has a binding which is not known at compile time.
+// This is useful for functions which have side-effects or are not deterministically computable.
+func LateFunctionBinding() OverloadOpt {
+ return decls.LateFunctionBinding()
+}
+
// OverloadIsNonStrict enables the function to be called with error and unknown argument values.
//
// Note: do not use this option unless absoluately necessary as it should be an uncommon feature.
@@ -312,20 +371,34 @@ func ExprTypeToType(t *exprpb.Type) (*Type, error) {
// ExprDeclToDeclaration converts a protobuf CEL declaration to a CEL-native declaration, either a Variable or Function.
func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) {
+ return AlphaProtoAsDeclaration(d)
+}
+
+// AlphaProtoAsDeclaration converts a v1alpha1.Decl value describing a variable or function into an EnvOption.
+func AlphaProtoAsDeclaration(d *exprpb.Decl) (EnvOption, error) {
+ canonical := &celpb.Decl{}
+ if err := convertProto(d, canonical); err != nil {
+ return nil, err
+ }
+ return ProtoAsDeclaration(canonical)
+}
+
+// ProtoAsDeclaration converts a canonical celpb.Decl value describing a variable or function into an EnvOption.
+func ProtoAsDeclaration(d *celpb.Decl) (EnvOption, error) {
switch d.GetDeclKind().(type) {
- case *exprpb.Decl_Function:
+ case *celpb.Decl_Function:
overloads := d.GetFunction().GetOverloads()
opts := make([]FunctionOpt, len(overloads))
for i, o := range overloads {
args := make([]*Type, len(o.GetParams()))
for j, p := range o.GetParams() {
- a, err := types.ExprTypeToType(p)
+ a, err := types.ProtoAsType(p)
if err != nil {
return nil, err
}
args[j] = a
}
- res, err := types.ExprTypeToType(o.GetResultType())
+ res, err := types.ProtoAsType(o.GetResultType())
if err != nil {
return nil, err
}
@@ -336,15 +409,15 @@ func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) {
}
}
return Function(d.GetName(), opts...), nil
- case *exprpb.Decl_Ident:
- t, err := types.ExprTypeToType(d.GetIdent().GetType())
+ case *celpb.Decl_Ident:
+ t, err := types.ProtoAsType(d.GetIdent().GetType())
if err != nil {
return nil, err
}
if d.GetIdent().GetValue() == nil {
return Variable(d.GetName(), t), nil
}
- val, err := ast.ConstantToVal(d.GetIdent().GetValue())
+ val, err := ast.ProtoConstantAsVal(d.GetIdent().GetValue())
if err != nil {
return nil, err
}
@@ -353,43 +426,3 @@ func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) {
return nil, fmt.Errorf("unsupported decl: %v", d)
}
}
-
-func typeValueToKind(tv ref.Type) (Kind, error) {
- switch tv {
- case types.BoolType:
- return BoolKind, nil
- case types.DoubleType:
- return DoubleKind, nil
- case types.IntType:
- return IntKind, nil
- case types.UintType:
- return UintKind, nil
- case types.ListType:
- return ListKind, nil
- case types.MapType:
- return MapKind, nil
- case types.StringType:
- return StringKind, nil
- case types.BytesType:
- return BytesKind, nil
- case types.DurationType:
- return DurationKind, nil
- case types.TimestampType:
- return TimestampKind, nil
- case types.NullType:
- return NullTypeKind, nil
- case types.TypeType:
- return TypeKind, nil
- default:
- switch tv.TypeName() {
- case "dyn":
- return DynKind, nil
- case "google.protobuf.Any":
- return AnyKind, nil
- case "optional":
- return OpaqueKind, nil
- default:
- return 0, fmt.Errorf("no known conversion for type of %s", tv.TypeName())
- }
- }
-}
diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go
index b5c3b4cc5..bb3014464 100644
--- a/vendor/github.com/google/cel-go/cel/env.go
+++ b/vendor/github.com/google/cel-go/cel/env.go
@@ -16,6 +16,8 @@ package cel
import (
"errors"
+ "fmt"
+ "math"
"sync"
"github.com/google/cel-go/checker"
@@ -24,12 +26,15 @@ import (
celast "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/env"
+ "github.com/google/cel-go/common/stdlib"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/interpreter"
"github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
// Source interface representing a user-provided expression.
@@ -38,26 +43,42 @@ type Source = common.Source
// Ast representing the checked or unchecked expression, its source, and related metadata such as
// source position information.
type Ast struct {
- expr *exprpb.Expr
- info *exprpb.SourceInfo
- source Source
- refMap map[int64]*celast.ReferenceInfo
- typeMap map[int64]*types.Type
+ source Source
+ impl *celast.AST
+}
+
+// NativeRep converts the AST to a Go-native representation.
+func (ast *Ast) NativeRep() *celast.AST {
+ if ast == nil {
+ return nil
+ }
+ return ast.impl
}
// Expr returns the proto serializable instance of the parsed/checked expression.
+//
+// Deprecated: prefer cel.AstToCheckedExpr() or cel.AstToParsedExpr() and call GetExpr()
+// the result instead.
func (ast *Ast) Expr() *exprpb.Expr {
- return ast.expr
+ if ast == nil {
+ return nil
+ }
+ pbExpr, _ := celast.ExprToProto(ast.NativeRep().Expr())
+ return pbExpr
}
// IsChecked returns whether the Ast value has been successfully type-checked.
func (ast *Ast) IsChecked() bool {
- return ast.typeMap != nil && len(ast.typeMap) > 0
+ return ast.NativeRep().IsChecked()
}
// SourceInfo returns character offset and newline position information about expression elements.
func (ast *Ast) SourceInfo() *exprpb.SourceInfo {
- return ast.info
+ if ast == nil {
+ return nil
+ }
+ pbInfo, _ := celast.SourceInfoToProto(ast.NativeRep().SourceInfo())
+ return pbInfo
}
// ResultType returns the output type of the expression if the Ast has been type-checked, else
@@ -65,9 +86,6 @@ func (ast *Ast) SourceInfo() *exprpb.SourceInfo {
//
// Deprecated: use OutputType
func (ast *Ast) ResultType() *exprpb.Type {
- if !ast.IsChecked() {
- return chkdecls.Dyn
- }
out := ast.OutputType()
t, err := TypeToExprType(out)
if err != nil {
@@ -79,16 +97,18 @@ func (ast *Ast) ResultType() *exprpb.Type {
// OutputType returns the output type of the expression if the Ast has been type-checked, else
// returns cel.DynType as the parse step cannot infer types.
func (ast *Ast) OutputType() *Type {
- t, found := ast.typeMap[ast.expr.GetId()]
- if !found {
- return DynType
+ if ast == nil {
+ return types.ErrorType
}
- return t
+ return ast.NativeRep().GetType(ast.NativeRep().Expr().ID())
}
// Source returns a view of the input used to create the Ast. This source may be complete or
// constructed from the SourceInfo.
func (ast *Ast) Source() Source {
+ if ast == nil {
+ return nil
+ }
return ast.source
}
@@ -112,12 +132,13 @@ type Env struct {
Container *containers.Container
variables []*decls.VariableDecl
functions map[string]*decls.FunctionDecl
- macros []parser.Macro
+ macros []Macro
+ contextProto protoreflect.MessageDescriptor
adapter types.Adapter
provider types.Provider
features map[int]bool
appliedFeatures map[int]bool
- libraries map[string]bool
+ libraries map[string]SingletonLibrary
validators []ASTValidator
costOptions []checker.CostOption
@@ -136,6 +157,134 @@ type Env struct {
progOpts []ProgramOption
}
+// ToConfig produces a YAML-serializable env.Config object from the given environment.
+//
+// The serialized configuration value is intended to represent a baseline set of config
+// options which could be used as input to an EnvOption to configure the majority of the
+// environment from a file.
+//
+// Note: validators, features, flags, and safe-guard settings are not yet supported by
+// the serialize method. Since optimizers are a separate construct from the environment
+// and the standard expression components (parse, check, evalute), they are also not
+// supported by the serialize method.
+func (e *Env) ToConfig(name string) (*env.Config, error) {
+ conf := env.NewConfig(name)
+ // Container settings
+ if e.Container != containers.DefaultContainer {
+ conf.SetContainer(e.Container.Name())
+ }
+ for _, typeName := range e.Container.AliasSet() {
+ conf.AddImports(env.NewImport(typeName))
+ }
+
+ libOverloads := map[string][]string{}
+ for libName, lib := range e.libraries {
+ // Track the options which have been configured by a library and
+ // then diff the library version against the configured function
+ // to detect incremental overloads or rewrites.
+ libEnv, _ := NewCustomEnv()
+ libEnv, _ = Lib(lib)(libEnv)
+ for fnName, fnDecl := range libEnv.Functions() {
+ if len(fnDecl.OverloadDecls()) == 0 {
+ continue
+ }
+ overloads, exist := libOverloads[fnName]
+ if !exist {
+ overloads = make([]string, 0, len(fnDecl.OverloadDecls()))
+ }
+ for _, o := range fnDecl.OverloadDecls() {
+ overloads = append(overloads, o.ID())
+ }
+ libOverloads[fnName] = overloads
+ }
+ subsetLib, canSubset := lib.(LibrarySubsetter)
+ alias := ""
+ if aliasLib, canAlias := lib.(LibraryAliaser); canAlias {
+ alias = aliasLib.LibraryAlias()
+ libName = alias
+ }
+ if libName == "stdlib" && canSubset {
+ conf.SetStdLib(subsetLib.LibrarySubset())
+ continue
+ }
+ version := uint32(math.MaxUint32)
+ if versionLib, isVersioned := lib.(LibraryVersioner); isVersioned {
+ version = versionLib.LibraryVersion()
+ }
+ conf.AddExtensions(env.NewExtension(libName, version))
+ }
+
+ // If this is a custom environment without the standard env, mark the stdlib as disabled.
+ if conf.StdLib == nil && !e.HasLibrary("cel.lib.std") {
+ conf.SetStdLib(env.NewLibrarySubset().SetDisabled(true))
+ }
+
+ // Serialize the variables
+ vars := make([]*decls.VariableDecl, 0, len(e.Variables()))
+ stdTypeVars := map[string]*decls.VariableDecl{}
+ for _, v := range stdlib.Types() {
+ stdTypeVars[v.Name()] = v
+ }
+ for _, v := range e.Variables() {
+ if _, isStdType := stdTypeVars[v.Name()]; isStdType {
+ continue
+ }
+ vars = append(vars, v)
+ }
+ if e.contextProto != nil {
+ conf.SetContextVariable(env.NewContextVariable(string(e.contextProto.FullName())))
+ skipVariables := map[string]bool{}
+ fields := e.contextProto.Fields()
+ for i := 0; i < fields.Len(); i++ {
+ field := fields.Get(i)
+ variable, err := fieldToVariable(field)
+ if err != nil {
+ return nil, fmt.Errorf("could not serialize context field variable %q, reason: %w", field.FullName(), err)
+ }
+ skipVariables[variable.Name()] = true
+ }
+ for _, v := range vars {
+ if _, found := skipVariables[v.Name()]; !found {
+ conf.AddVariableDecls(v)
+ }
+ }
+ } else {
+ conf.AddVariableDecls(vars...)
+ }
+
+ // Serialize functions which are distinct from the ones configured by libraries.
+ for fnName, fnDecl := range e.Functions() {
+ if excludedOverloads, found := libOverloads[fnName]; found {
+ if newDecl := fnDecl.Subset(decls.ExcludeOverloads(excludedOverloads...)); newDecl != nil {
+ conf.AddFunctionDecls(newDecl)
+ }
+ } else {
+ conf.AddFunctionDecls(fnDecl)
+ }
+ }
+
+ // Serialize validators
+ for _, val := range e.Validators() {
+ // Only add configurable validators to the env.Config as all others are
+ // expected to be implicitly enabled via extension libraries.
+ if confVal, ok := val.(ConfigurableASTValidator); ok {
+ conf.AddValidators(confVal.ToConfig())
+ }
+ }
+
+ // Serialize features
+ for featID, enabled := range e.features {
+ featName, found := featureNameByID(featID)
+ if !found {
+ // If the feature isn't named, it isn't intended to be publicly exposed
+ continue
+ }
+ conf.AddFeatures(env.NewFeature(featName, enabled))
+ }
+
+ return conf, nil
+}
+
// NewEnv creates a program environment configured with the standard library of CEL functions and
// macros. The Env value returned can parse and check any CEL program which builds upon the core
// features documented in the CEL specification.
@@ -179,7 +328,7 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) {
provider: registry,
features: map[int]bool{},
appliedFeatures: map[int]bool{},
- libraries: map[string]bool{},
+ libraries: map[string]SingletonLibrary{},
validators: []ASTValidator{},
progOpts: []ProgramOption{},
costOptions: []checker.CostOption{},
@@ -198,29 +347,28 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) {
// It is possible to have both non-nil Ast and Issues values returned from this call: however,
// the mere presence of an Ast does not imply that it is valid for use.
func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
- // Note, errors aren't currently possible on the Ast to ParsedExpr conversion.
- pe, _ := AstToParsedExpr(ast)
-
// Construct the internal checker env, erroring if there is an issue adding the declarations.
chk, err := e.initChecker()
if err != nil {
errs := common.NewErrors(ast.Source())
- errs.ReportError(common.NoLocation, err.Error())
- return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo())
+ errs.ReportErrorString(common.NoLocation, err.Error())
+ return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo())
}
- res, errs := checker.Check(pe, ast.Source(), chk)
+ checked, errs := checker.Check(ast.NativeRep(), ast.Source(), chk)
if len(errs.GetErrors()) > 0 {
- return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo())
+ return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo())
}
// Manually create the Ast to ensure that the Ast source information (which may be more
// detailed than the information provided by Check), is returned to the caller.
ast = &Ast{
- source: ast.Source(),
- expr: res.Expr,
- info: res.SourceInfo,
- refMap: res.ReferenceMap,
- typeMap: res.TypeMap}
+ source: ast.Source(),
+ impl: checked}
+
+ // Avoid creating a validator config if it's not needed.
+ if len(e.validators) == 0 {
+ return ast, nil
+ }
// Generate a validator configuration from the set of configured validators.
vConfig := newValidatorConfig()
@@ -230,9 +378,9 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
}
}
// Apply additional validators on the type-checked result.
- iss := NewIssuesWithSourceInfo(errs, ast.SourceInfo())
+ iss := NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo())
for _, v := range e.validators {
- v.Validate(e, vConfig, res, iss)
+ v.Validate(e, vConfig, checked, iss)
}
if iss.Err() != nil {
return nil, iss
@@ -295,17 +443,13 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
copy(chkOptsCopy, e.chkOpts)
// Copy the declarations if needed.
- varsCopy := []*decls.VariableDecl{}
if chk != nil {
// If the type-checker has already been instantiated, then the e.declarations have been
// validated within the chk instance.
chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(chk))
- } else {
- // If the type-checker has not been instantiated, ensure the unvalidated declarations are
- // provided to the extended Env instance.
- varsCopy = make([]*decls.VariableDecl, len(e.variables))
- copy(varsCopy, e.variables)
}
+ varsCopy := make([]*decls.VariableDecl, len(e.variables))
+ copy(varsCopy, e.variables)
// Copy macros and program options
macsCopy := make([]parser.Macro, len(e.macros))
@@ -352,7 +496,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
for k, v := range e.functions {
funcsCopy[k] = v
}
- libsCopy := make(map[string]bool, len(e.libraries))
+ libsCopy := make(map[string]SingletonLibrary, len(e.libraries))
for k, v := range e.libraries {
libsCopy[k] = v
}
@@ -366,6 +510,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
variables: varsCopy,
functions: funcsCopy,
macros: macsCopy,
+ contextProto: e.contextProto,
progOpts: progOptsCopy,
adapter: adapter,
features: featuresCopy,
@@ -389,8 +534,8 @@ func (e *Env) HasFeature(flag int) bool {
// HasLibrary returns whether a specific SingletonLibrary has been configured in the environment.
func (e *Env) HasLibrary(libName string) bool {
- configured, exists := e.libraries[libName]
- return exists && configured
+ _, exists := e.libraries[libName]
+ return exists
}
// Libraries returns a list of SingletonLibrary that have been configured in the environment.
@@ -402,6 +547,35 @@ func (e *Env) Libraries() []string {
return libraries
}
+// HasFunction returns whether a specific function has been configured in the environment
+func (e *Env) HasFunction(functionName string) bool {
+ _, ok := e.functions[functionName]
+ return ok
+}
+
+// Functions returns a shallow copy of the Functions, keyed by function name, that have been configured in the environment.
+func (e *Env) Functions() map[string]*decls.FunctionDecl {
+ shallowCopy := make(map[string]*decls.FunctionDecl, len(e.functions))
+ for nm, fn := range e.functions {
+ shallowCopy[nm] = fn
+ }
+ return shallowCopy
+}
+
+// Variables returns a shallow copy of the variables associated with the environment.
+func (e *Env) Variables() []*decls.VariableDecl {
+ shallowCopy := make([]*decls.VariableDecl, len(e.variables))
+ copy(shallowCopy, e.variables)
+ return shallowCopy
+}
+
+// Macros returns a shallow copy of macros associated with the environment.
+func (e *Env) Macros() []Macro {
+ shallowCopy := make([]Macro, len(e.macros))
+ copy(shallowCopy, e.macros)
+ return shallowCopy
+}
+
// HasValidator returns whether a specific ASTValidator has been configured in the environment.
func (e *Env) HasValidator(name string) bool {
for _, v := range e.validators {
@@ -412,6 +586,11 @@ func (e *Env) HasValidator(name string) bool {
return false
}
+// Validators returns the set of ASTValidators configured on the environment.
+func (e *Env) Validators() []ASTValidator {
+ return e.validators[:]
+}
+
// Parse parses the input expression value `txt` to a Ast and/or a set of Issues.
//
// This form of Parse creates a Source value for the input `txt` and forwards to the
@@ -429,20 +608,21 @@ func (e *Env) Parse(txt string) (*Ast, *Issues) {
// It is possible to have both non-nil Ast and Issues values returned from this call; however,
// the mere presence of an Ast does not imply that it is valid for use.
func (e *Env) ParseSource(src Source) (*Ast, *Issues) {
- res, errs := e.prsr.Parse(src)
+ parsed, errs := e.prsr.Parse(src)
if len(errs.GetErrors()) > 0 {
return nil, &Issues{errs: errs}
}
- // Manually create the Ast to ensure that the text source information is propagated on
- // subsequent calls to Check.
- return &Ast{
- source: src,
- expr: res.GetExpr(),
- info: res.GetSourceInfo()}, nil
+ return &Ast{source: src, impl: parsed}, nil
}
// Program generates an evaluable instance of the Ast within the environment (Env).
func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) {
+ return e.PlanProgram(ast.NativeRep(), opts...)
+}
+
+// PlanProgram generates an evaluable instance of the AST in the go-native representation within
+// the environment (Env).
+func (e *Env) PlanProgram(a *celast.AST, opts ...ProgramOption) (Program, error) {
optSet := e.progOpts
if len(opts) != 0 {
mergedOpts := []ProgramOption{}
@@ -450,7 +630,7 @@ func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) {
mergedOpts = append(mergedOpts, opts...)
optSet = mergedOpts
}
- return newProgram(e, ast, optSet)
+ return newProgram(e, a, optSet)
}
// CELTypeAdapter returns the `types.Adapter` configured for the environment.
@@ -480,31 +660,30 @@ func (e *Env) TypeProvider() ref.TypeProvider {
return &interopLegacyTypeProvider{Provider: e.provider}
}
-// UnknownVars returns an interpreter.PartialActivation which marks all variables declared in the
-// Env as unknown AttributePattern values.
+// UnknownVars returns a PartialActivation which marks all variables declared in the Env as
+// unknown AttributePattern values.
//
-// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation unless the
-// PartialAttributes option is provided as a ProgramOption.
-func (e *Env) UnknownVars() interpreter.PartialActivation {
+// Note, the UnknownVars will behave the same as an cel.NoVars() unless the PartialAttributes
+// option is provided as a ProgramOption.
+func (e *Env) UnknownVars() PartialActivation {
act := interpreter.EmptyActivation()
part, _ := PartialVars(act, e.computeUnknownVars(act)...)
return part
}
-// PartialVars returns an interpreter.PartialActivation where all variables not in the input variable
+// PartialVars returns a PartialActivation where all variables not in the input variable
// set, but which have been configured in the environment, are marked as unknown.
//
-// The `vars` value may either be an interpreter.Activation or any valid input to the
-// interpreter.NewActivation call.
+// The `vars` value may either be an Activation or any valid input to the cel.NewActivation call.
//
// Note, this is equivalent to calling cel.PartialVars and manually configuring the set of unknown
// variables. For more advanced use cases of partial state where portions of an object graph, rather
// than top-level variables, are missing the PartialVars() method may be a more suitable choice.
//
-// Note, the PartialVars will behave the same as an interpreter.EmptyActivation unless the
-// PartialAttributes option is provided as a ProgramOption.
-func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) {
- act, err := interpreter.NewActivation(vars)
+// Note, the PartialVars will behave the same as cel.NoVars() unless the PartialAttributes
+// option is provided as a ProgramOption.
+func (e *Env) PartialVars(vars any) (PartialActivation, error) {
+ act, err := NewActivation(vars)
if err != nil {
return nil, err
}
@@ -534,8 +713,10 @@ func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) {
// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an
// Ast format and then Program again.
func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
- pruned := interpreter.PruneAst(a.Expr(), a.SourceInfo().GetMacroCalls(), details.State())
- expr, err := AstToString(ParsedExprToAst(pruned))
+ ast := a.NativeRep()
+ pruned := interpreter.PruneAst(ast.Expr(), ast.SourceInfo().MacroCalls(), details.State())
+ newAST := &Ast{source: a.Source(), impl: pruned}
+ expr, err := AstToString(newAST)
if err != nil {
return nil, err
}
@@ -556,16 +737,10 @@ func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
// EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and
// extension functions provided by estimator.
func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) {
- checked := &celast.CheckedAST{
- Expr: ast.Expr(),
- SourceInfo: ast.SourceInfo(),
- TypeMap: ast.typeMap,
- ReferenceMap: ast.refMap,
- }
extendedOpts := make([]checker.CostOption, 0, len(e.costOptions))
extendedOpts = append(extendedOpts, opts...)
extendedOpts = append(extendedOpts, e.costOptions...)
- return checker.Cost(checked, estimator, extendedOpts...)
+ return checker.Cost(ast.NativeRep(), estimator, extendedOpts...)
}
// configure applies a series of EnvOptions to the current environment.
@@ -580,10 +755,15 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
}
}
- // If the default UTC timezone fix has been enabled, make sure the library is configured
- e, err = e.maybeApplyFeature(featureDefaultUTCTimeZone, Lib(timeUTCLibrary{}))
- if err != nil {
- return nil, err
+ // If the default UTC timezone has been disabled, configure the legacy overloads
+ if utcTime, isSet := e.features[featureDefaultUTCTimeZone]; isSet && !utcTime {
+ if !e.appliedFeatures[featureDefaultUTCTimeZone] {
+ e.appliedFeatures[featureDefaultUTCTimeZone] = true
+ e, err = Lib(timeLegacyLibrary{})(e)
+ if err != nil {
+ return nil, err
+ }
+ }
}
// Configure the parser.
@@ -597,6 +777,9 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
if e.HasFeature(featureVariadicLogicalASTs) {
prsrOpts = append(prsrOpts, parser.EnableVariadicOperatorASTs(true))
}
+ if e.HasFeature(featureIdentEscapeSyntax) {
+ prsrOpts = append(prsrOpts, parser.EnableIdentEscapeSyntax(true))
+ }
e.prsr, err = parser.NewParser(prsrOpts...)
if err != nil {
return nil, err
@@ -664,30 +847,9 @@ func (e *Env) getCheckerOrError() (*checker.Env, error) {
return e.chk, e.chkErr
}
-// maybeApplyFeature determines whether the feature-guarded option is enabled, and if so applies
-// the feature if it has not already been enabled.
-func (e *Env) maybeApplyFeature(feature int, option EnvOption) (*Env, error) {
- if !e.HasFeature(feature) {
- return e, nil
- }
- _, applied := e.appliedFeatures[feature]
- if applied {
- return e, nil
- }
- e, err := option(e)
- if err != nil {
- return nil, err
- }
- // record that the feature has been applied since it will generate declarations
- // and functions which will be propagated on Extend() calls and which should only
- // be registered once.
- e.appliedFeatures[feature] = true
- return e, nil
-}
-
// computeUnknownVars determines a set of missing variables based on the input activation and the
// environment's configured declaration set.
-func (e *Env) computeUnknownVars(vars interpreter.Activation) []*interpreter.AttributePattern {
+func (e *Env) computeUnknownVars(vars Activation) []*interpreter.AttributePattern {
var unknownPatterns []*interpreter.AttributePattern
for _, v := range e.variables {
varName := v.Name()
@@ -707,7 +869,7 @@ type Error = common.Error
// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct.
type Issues struct {
errs *common.Errors
- info *exprpb.SourceInfo
+ info *celast.SourceInfo
}
// NewIssues returns an Issues struct from a common.Errors object.
@@ -718,7 +880,7 @@ func NewIssues(errs *common.Errors) *Issues {
// NewIssuesWithSourceInfo returns an Issues struct from a common.Errors object with SourceInfo metatata
// which can be used with the `ReportErrorAtID` method for additional error reports within the context
// information that's inferred from an expression id.
-func NewIssuesWithSourceInfo(errs *common.Errors, info *exprpb.SourceInfo) *Issues {
+func NewIssuesWithSourceInfo(errs *common.Errors, info *celast.SourceInfo) *Issues {
return &Issues{
errs: errs,
info: info,
@@ -749,10 +911,10 @@ func (i *Issues) Append(other *Issues) *Issues {
if i == nil {
return other
}
- if other == nil {
+ if other == nil || i == other {
return i
}
- return NewIssues(i.errs.Append(other.errs.GetErrors()))
+ return NewIssuesWithSourceInfo(i.errs.Append(other.errs.GetErrors()), i.info)
}
// String converts the issues to a suitable display string.
@@ -768,30 +930,7 @@ func (i *Issues) String() string {
// The source metadata for the expression at `id`, if present, is attached to the error report.
// To ensure that source metadata is attached to error reports, use NewIssuesWithSourceInfo.
func (i *Issues) ReportErrorAtID(id int64, message string, args ...any) {
- i.errs.ReportErrorAtID(id, locationByID(id, i.info), message, args...)
-}
-
-// locationByID returns a common.Location given an expression id.
-//
-// TODO: move this functionality into the native SourceInfo and an overhaul of the common.Source
-// as this implementation relies on the abstractions present in the protobuf SourceInfo object,
-// and is replicated in the checker.
-func locationByID(id int64, sourceInfo *exprpb.SourceInfo) common.Location {
- positions := sourceInfo.GetPositions()
- var line = 1
- if offset, found := positions[id]; found {
- col := int(offset)
- for _, lineOffset := range sourceInfo.GetLineOffsets() {
- if lineOffset < offset {
- line++
- col = int(offset - lineOffset)
- } else {
- break
- }
- }
- return common.NewLocation(line, col)
- }
- return common.NoLocation
+ i.errs.ReportErrorAtID(id, i.info.GetStartLocation(id), message, args...)
}
// getStdEnv lazy initializes the CEL standard environment.
@@ -809,7 +948,7 @@ type interopCELTypeProvider struct {
// FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists.
//
-// This method proxies to the underyling ref.TypeProvider's FindType method and converts protobuf type
+// This method proxies to the underlying ref.TypeProvider's FindType method and converts protobuf type
// into a native type representation. If the conversion fails, the type is listed as not found.
func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) {
if et, found := p.FindType(typeName); found {
@@ -822,10 +961,17 @@ func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, b
return nil, false
}
+// FindStructFieldNames returns an empty set of field for the interop provider.
+//
+// To inspect the field names, migrate to a `types.Provider` implementation.
+func (p *interopCELTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) {
+ return []string{}, false
+}
+
// FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field
// name, if one exists.
//
-// This method proxies to the underyling ref.TypeProvider's FindFieldType method and converts protobuf type
+// This method proxies to the underlying ref.TypeProvider's FindFieldType method and converts protobuf type
// into a native type representation. If the conversion fails, the type is listed as not found.
func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) {
if ft, found := p.FindFieldType(structType, fieldName); found {
diff --git a/vendor/github.com/google/cel-go/cel/folding.go b/vendor/github.com/google/cel-go/cel/folding.go
new file mode 100644
index 000000000..40d843ece
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/folding.go
@@ -0,0 +1,603 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// ConstantFoldingOption defines a functional option for configuring constant folding.
+type ConstantFoldingOption func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error)
+
+// MaxConstantFoldIterations limits the number of times literals may be folding during optimization.
+//
+// Defaults to 100 if not set.
+func MaxConstantFoldIterations(limit int) ConstantFoldingOption {
+ return func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error) {
+ opt.maxFoldIterations = limit
+ return opt, nil
+ }
+}
+
+// Adds an Activation which provides known values for the folding evaluator
+//
+// Any values the activation provides will be used by the constant folder and turned into
+// literals in the AST.
+//
+// Defaults to the NoVars() Activation
+func FoldKnownValues(knownValues Activation) ConstantFoldingOption {
+ return func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error) {
+ if knownValues != nil {
+ opt.knownValues = knownValues
+ } else {
+ opt.knownValues = NoVars()
+ }
+ return opt, nil
+ }
+}
+
+// NewConstantFoldingOptimizer creates an optimizer which inlines constant scalar an aggregate
+// literal values within function calls and select statements with their evaluated result.
+func NewConstantFoldingOptimizer(opts ...ConstantFoldingOption) (ASTOptimizer, error) {
+ folder := &constantFoldingOptimizer{
+ maxFoldIterations: defaultMaxConstantFoldIterations,
+ }
+ var err error
+ for _, o := range opts {
+ folder, err = o(folder)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return folder, nil
+}
+
+type constantFoldingOptimizer struct {
+ maxFoldIterations int
+ knownValues Activation
+}
+
+// Optimize queries the expression graph for scalar and aggregate literal expressions within call and
+// select statements and then evaluates them and replaces the call site with the literal result.
+//
+// Note: only values which can be represented as literals in CEL syntax are supported.
+func (opt *constantFoldingOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST {
+ root := ast.NavigateAST(a)
+
+ // Walk the list of foldable expression and continue to fold until there are no more folds left.
+ // All of the fold candidates returned by the constantExprMatcher should succeed unless there's
+ // a logic bug with the selection of expressions.
+ constantExprMatcherCapture := func(e ast.NavigableExpr) bool { return opt.constantExprMatcher(ctx, a, e) }
+ foldableExprs := ast.MatchDescendants(root, constantExprMatcherCapture)
+ foldCount := 0
+ for len(foldableExprs) != 0 && foldCount < opt.maxFoldIterations {
+ for _, fold := range foldableExprs {
+ // If the expression could be folded because it's a non-strict call, and the
+ // branches are pruned, continue to the next fold.
+ if fold.Kind() == ast.CallKind && maybePruneBranches(ctx, fold) {
+ continue
+ }
+ // Late-bound function calls cannot be folded.
+ if fold.Kind() == ast.CallKind && isLateBoundFunctionCall(ctx, a, fold) {
+ continue
+ }
+ // Otherwise, assume all context is needed to evaluate the expression.
+ err := opt.tryFold(ctx, a, fold)
+ // Ignore errors for identifiers, since there is no guarantee that the environment
+ // has a value for them.
+ if err != nil && fold.Kind() != ast.IdentKind {
+ ctx.ReportErrorAtID(fold.ID(), "constant-folding evaluation failed: %v", err.Error())
+ return a
+ }
+ }
+ foldCount++
+ foldableExprs = ast.MatchDescendants(root, constantExprMatcherCapture)
+ }
+ // Once all of the constants have been folded, try to run through the remaining comprehensions
+ // one last time. In this case, there's no guarantee they'll run, so we only update the
+ // target comprehension node with the literal value if the evaluation succeeds.
+ for _, compre := range ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) {
+ opt.tryFold(ctx, a, compre)
+ }
+
+ // If the output is a list, map, or struct which contains optional entries, then prune it
+ // to make sure that the optionals, if resolved, do not surface in the output literal.
+ pruneOptionalElements(ctx, root)
+
+ // Ensure that all intermediate values in the folded expression can be represented as valid
+ // CEL literals within the AST structure. Use `PostOrderVisit` rather than `MatchDescendents`
+ // to avoid extra allocations during this final pass through the AST.
+ ast.PostOrderVisit(root, ast.NewExprVisitor(func(e ast.Expr) {
+ if e.Kind() != ast.LiteralKind {
+ return
+ }
+ val := e.AsLiteral()
+ adapted, err := adaptLiteral(ctx, val)
+ if err != nil {
+ ctx.ReportErrorAtID(root.ID(), "constant-folding evaluation failed: %v", err.Error())
+ return
+ }
+ ctx.UpdateExpr(e, adapted)
+ }))
+
+ return a
+}
+
+// tryFold attempts to evaluate a sub-expression to a literal.
+//
+// If the evaluation succeeds, the input expr value will be modified to become a literal, otherwise
+// the method will return an error.
+func (opt *constantFoldingOptimizer) tryFold(ctx *OptimizerContext, a *ast.AST, expr ast.Expr) error {
+ // Assume all context is needed to evaluate the expression.
+ subAST := &Ast{
+ impl: ast.NewCheckedAST(ast.NewAST(expr, a.SourceInfo()), a.TypeMap(), a.ReferenceMap()),
+ }
+ prg, err := ctx.Program(subAST)
+ if err != nil {
+ return err
+ }
+ activation := opt.knownValues
+ if activation == nil {
+ activation = NoVars()
+ }
+ out, _, err := prg.Eval(activation)
+ if err != nil {
+ return err
+ }
+ // Update the fold expression to be a literal.
+ ctx.UpdateExpr(expr, ctx.NewLiteral(out))
+ return nil
+}
+
+func isLateBoundFunctionCall(ctx *OptimizerContext, a *ast.AST, expr ast.Expr) bool {
+ call := expr.AsCall()
+ function := ctx.Functions()[call.FunctionName()]
+ if function == nil {
+ return false
+ }
+ return function.HasLateBinding()
+}
+
+// maybePruneBranches inspects the non-strict call expression to determine whether
+// a branch can be removed. Evaluation will naturally prune logical and / or calls,
+// but conditional will not be pruned cleanly, so this is one small area where the
+// constant folding step reimplements a portion of the evaluator.
+func maybePruneBranches(ctx *OptimizerContext, expr ast.NavigableExpr) bool {
+ call := expr.AsCall()
+ args := call.Args()
+ switch call.FunctionName() {
+ case operators.LogicalAnd, operators.LogicalOr:
+ return maybeShortcircuitLogic(ctx, call.FunctionName(), args, expr)
+ case operators.Conditional:
+ cond := args[0]
+ truthy := args[1]
+ falsy := args[2]
+ if cond.Kind() != ast.LiteralKind {
+ return false
+ }
+ if cond.AsLiteral() == types.True {
+ ctx.UpdateExpr(expr, truthy)
+ } else {
+ ctx.UpdateExpr(expr, falsy)
+ }
+ return true
+ case operators.In:
+ haystack := args[1]
+ if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 {
+ ctx.UpdateExpr(expr, ctx.NewLiteral(types.False))
+ return true
+ }
+ needle := args[0]
+ if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind {
+ needleValue := needle.AsLiteral()
+ list := haystack.AsList()
+ for _, e := range list.Elements() {
+ if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True {
+ ctx.UpdateExpr(expr, ctx.NewLiteral(types.True))
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func maybeShortcircuitLogic(ctx *OptimizerContext, function string, args []ast.Expr, expr ast.NavigableExpr) bool {
+ shortcircuit := types.False
+ skip := types.True
+ if function == operators.LogicalOr {
+ shortcircuit = types.True
+ skip = types.False
+ }
+ newArgs := []ast.Expr{}
+ for _, arg := range args {
+ if arg.Kind() != ast.LiteralKind {
+ newArgs = append(newArgs, arg)
+ continue
+ }
+ if arg.AsLiteral() == skip {
+ continue
+ }
+ if arg.AsLiteral() == shortcircuit {
+ ctx.UpdateExpr(expr, arg)
+ return true
+ }
+ }
+ if len(newArgs) == 0 {
+ newArgs = append(newArgs, args[0])
+ ctx.UpdateExpr(expr, newArgs[0])
+ return true
+ }
+ if len(newArgs) == 1 {
+ ctx.UpdateExpr(expr, newArgs[0])
+ return true
+ }
+ ctx.UpdateExpr(expr, ctx.NewCall(function, newArgs...))
+ return true
+}
+
+// pruneOptionalElements works from the bottom up to resolve optional elements within
+// aggregate literals.
+//
+// Note, many aggregate literals will be resolved as arguments to functions or select
+// statements, so this method exists to handle the case where the literal could not be
+// fully resolved or exists outside of a call, select, or comprehension context.
+func pruneOptionalElements(ctx *OptimizerContext, root ast.NavigableExpr) {
+ aggregateLiterals := ast.MatchDescendants(root, aggregateLiteralMatcher)
+ for _, lit := range aggregateLiterals {
+ switch lit.Kind() {
+ case ast.ListKind:
+ pruneOptionalListElements(ctx, lit)
+ case ast.MapKind:
+ pruneOptionalMapEntries(ctx, lit)
+ case ast.StructKind:
+ pruneOptionalStructFields(ctx, lit)
+ }
+ }
+}
+
+func pruneOptionalListElements(ctx *OptimizerContext, e ast.Expr) {
+ l := e.AsList()
+ elems := l.Elements()
+ optIndices := l.OptionalIndices()
+ if len(optIndices) == 0 {
+ return
+ }
+ updatedElems := []ast.Expr{}
+ updatedIndices := []int32{}
+ newOptIndex := -1
+ for _, e := range elems {
+ newOptIndex++
+ if !l.IsOptional(int32(newOptIndex)) {
+ updatedElems = append(updatedElems, e)
+ continue
+ }
+ if e.Kind() != ast.LiteralKind {
+ updatedElems = append(updatedElems, e)
+ updatedIndices = append(updatedIndices, int32(newOptIndex))
+ continue
+ }
+ optElemVal, ok := e.AsLiteral().(*types.Optional)
+ if !ok {
+ updatedElems = append(updatedElems, e)
+ updatedIndices = append(updatedIndices, int32(newOptIndex))
+ continue
+ }
+ if !optElemVal.HasValue() {
+ newOptIndex-- // Skipping causes the list to get smaller.
+ continue
+ }
+ ctx.UpdateExpr(e, ctx.NewLiteral(optElemVal.GetValue()))
+ updatedElems = append(updatedElems, e)
+ }
+ ctx.UpdateExpr(e, ctx.NewList(updatedElems, updatedIndices))
+}
+
+func pruneOptionalMapEntries(ctx *OptimizerContext, e ast.Expr) {
+ m := e.AsMap()
+ entries := m.Entries()
+ updatedEntries := []ast.EntryExpr{}
+ modified := false
+ for _, e := range entries {
+ entry := e.AsMapEntry()
+ key := entry.Key()
+ val := entry.Value()
+ // If the entry is not optional, or the value-side of the optional hasn't
+ // been resolved to a literal, then preserve the entry as-is.
+ if !entry.IsOptional() || val.Kind() != ast.LiteralKind {
+ updatedEntries = append(updatedEntries, e)
+ continue
+ }
+ optElemVal, ok := val.AsLiteral().(*types.Optional)
+ if !ok {
+ updatedEntries = append(updatedEntries, e)
+ continue
+ }
+ // When the key is not a literal, but the value is, then it needs to be
+ // restored to an optional value.
+ if key.Kind() != ast.LiteralKind {
+ undoOptVal, err := adaptLiteral(ctx, optElemVal)
+ if err != nil {
+ ctx.ReportErrorAtID(val.ID(), "invalid map value literal %v: %v", optElemVal, err)
+ }
+ ctx.UpdateExpr(val, undoOptVal)
+ updatedEntries = append(updatedEntries, e)
+ continue
+ }
+ modified = true
+ if !optElemVal.HasValue() {
+ continue
+ }
+ ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue()))
+ updatedEntry := ctx.NewMapEntry(key, val, false)
+ updatedEntries = append(updatedEntries, updatedEntry)
+ }
+ if modified {
+ ctx.UpdateExpr(e, ctx.NewMap(updatedEntries))
+ }
+}
+
+func pruneOptionalStructFields(ctx *OptimizerContext, e ast.Expr) {
+ s := e.AsStruct()
+ fields := s.Fields()
+ updatedFields := []ast.EntryExpr{}
+ modified := false
+ for _, f := range fields {
+ field := f.AsStructField()
+ val := field.Value()
+ if !field.IsOptional() || val.Kind() != ast.LiteralKind {
+ updatedFields = append(updatedFields, f)
+ continue
+ }
+ optElemVal, ok := val.AsLiteral().(*types.Optional)
+ if !ok {
+ updatedFields = append(updatedFields, f)
+ continue
+ }
+ modified = true
+ if !optElemVal.HasValue() {
+ continue
+ }
+ ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue()))
+ updatedField := ctx.NewStructField(field.Name(), val, false)
+ updatedFields = append(updatedFields, updatedField)
+ }
+ if modified {
+ ctx.UpdateExpr(e, ctx.NewStruct(s.TypeName(), updatedFields))
+ }
+}
+
+// adaptLiteral converts a runtime CEL value to its equivalent literal expression.
+//
+// For strongly typed values, the type-provider will be used to reconstruct the fields
+// which are present in the literal and their equivalent initialization values.
+func adaptLiteral(ctx *OptimizerContext, val ref.Val) (ast.Expr, error) {
+ switch t := val.Type().(type) {
+ case *types.Type:
+ switch t {
+ case types.BoolType, types.BytesType, types.DoubleType, types.IntType,
+ types.NullType, types.StringType, types.UintType:
+ return ctx.NewLiteral(val), nil
+ case types.DurationType:
+ return ctx.NewCall(
+ overloads.TypeConvertDuration,
+ ctx.NewLiteral(val.ConvertToType(types.StringType)),
+ ), nil
+ case types.TimestampType:
+ return ctx.NewCall(
+ overloads.TypeConvertTimestamp,
+ ctx.NewLiteral(val.ConvertToType(types.StringType)),
+ ), nil
+ case types.OptionalType:
+ opt := val.(*types.Optional)
+ if !opt.HasValue() {
+ return ctx.NewCall("optional.none"), nil
+ }
+ target, err := adaptLiteral(ctx, opt.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return ctx.NewCall("optional.of", target), nil
+ case types.TypeType:
+ return ctx.NewIdent(val.(*types.Type).TypeName()), nil
+ case types.ListType:
+ l, ok := val.(traits.Lister)
+ if !ok {
+ return nil, fmt.Errorf("failed to adapt %v to literal", val)
+ }
+ elems := make([]ast.Expr, l.Size().(types.Int))
+ idx := 0
+ it := l.Iterator()
+ for it.HasNext() == types.True {
+ elemVal := it.Next()
+ elemExpr, err := adaptLiteral(ctx, elemVal)
+ if err != nil {
+ return nil, err
+ }
+ elems[idx] = elemExpr
+ idx++
+ }
+ return ctx.NewList(elems, []int32{}), nil
+ case types.MapType:
+ m, ok := val.(traits.Mapper)
+ if !ok {
+ return nil, fmt.Errorf("failed to adapt %v to literal", val)
+ }
+ entries := make([]ast.EntryExpr, m.Size().(types.Int))
+ idx := 0
+ it := m.Iterator()
+ for it.HasNext() == types.True {
+ keyVal := it.Next()
+ keyExpr, err := adaptLiteral(ctx, keyVal)
+ if err != nil {
+ return nil, err
+ }
+ valVal := m.Get(keyVal)
+ valExpr, err := adaptLiteral(ctx, valVal)
+ if err != nil {
+ return nil, err
+ }
+ entries[idx] = ctx.NewMapEntry(keyExpr, valExpr, false)
+ idx++
+ }
+ return ctx.NewMap(entries), nil
+ default:
+ provider := ctx.CELTypeProvider()
+ fields, found := provider.FindStructFieldNames(t.TypeName())
+ if !found {
+ return nil, fmt.Errorf("failed to adapt %v to literal", val)
+ }
+ tester := val.(traits.FieldTester)
+ indexer := val.(traits.Indexer)
+ fieldInits := []ast.EntryExpr{}
+ for _, f := range fields {
+ field := types.String(f)
+ if tester.IsSet(field) != types.True {
+ continue
+ }
+ fieldVal := indexer.Get(field)
+ fieldExpr, err := adaptLiteral(ctx, fieldVal)
+ if err != nil {
+ return nil, err
+ }
+ fieldInits = append(fieldInits, ctx.NewStructField(f, fieldExpr, false))
+ }
+ return ctx.NewStruct(t.TypeName(), fieldInits), nil
+ }
+ }
+ return nil, fmt.Errorf("failed to adapt %v to literal", val)
+}
+
+// constantExprMatcher matches calls, select statements, and comprehensions whose arguments
+// are all constant scalar or aggregate literal values.
+//
+// Only comprehensions which are not nested are included as possible constant folds, and only
+// if all variables referenced in the comprehension stack exist are only iteration or
+// accumulation variables.
+func (opt *constantFoldingOptimizer) constantExprMatcher(ctx *OptimizerContext, a *ast.AST, e ast.NavigableExpr) bool {
+ switch e.Kind() {
+ case ast.CallKind:
+ return constantCallMatcher(e)
+ case ast.SelectKind:
+ sel := e.AsSelect() // guaranteed to be a navigable value
+ return constantMatcher(sel.Operand().(ast.NavigableExpr))
+ case ast.IdentKind:
+ return opt.knownValues != nil && a.ReferenceMap()[e.ID()] != nil
+ case ast.ComprehensionKind:
+ if isNestedComprehension(e) {
+ return false
+ }
+ vars := map[string]bool{}
+ constantExprs := true
+ visitor := ast.NewExprVisitor(func(e ast.Expr) {
+ if e.Kind() == ast.ComprehensionKind {
+ nested := e.AsComprehension()
+ vars[nested.AccuVar()] = true
+ vars[nested.IterVar()] = true
+ }
+ if e.Kind() == ast.IdentKind && !vars[e.AsIdent()] {
+ constantExprs = false
+ }
+ // Late-bound function calls cannot be folded.
+ if e.Kind() == ast.CallKind && isLateBoundFunctionCall(ctx, a, e) {
+ constantExprs = false
+ }
+ })
+ ast.PreOrderVisit(e, visitor)
+ return constantExprs
+ default:
+ return false
+ }
+}
+
+// constantCallMatcher identifies strict and non-strict calls which can be folded.
+func constantCallMatcher(e ast.NavigableExpr) bool {
+ call := e.AsCall()
+ children := e.Children()
+ fnName := call.FunctionName()
+ if fnName == operators.LogicalAnd {
+ for _, child := range children {
+ if child.Kind() == ast.LiteralKind {
+ return true
+ }
+ }
+ }
+ if fnName == operators.LogicalOr {
+ for _, child := range children {
+ if child.Kind() == ast.LiteralKind {
+ return true
+ }
+ }
+ }
+ if fnName == operators.Conditional {
+ cond := children[0]
+ if cond.Kind() == ast.LiteralKind && cond.AsLiteral().Type() == types.BoolType {
+ return true
+ }
+ }
+ if fnName == operators.In {
+ haystack := children[1]
+ if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 {
+ return true
+ }
+ needle := children[0]
+ if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind {
+ needleValue := needle.AsLiteral()
+ list := haystack.AsList()
+ for _, e := range list.Elements() {
+ if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True {
+ return true
+ }
+ }
+ }
+ }
+ // convert all other calls with constant arguments
+ for _, child := range children {
+ if !constantMatcher(child) {
+ return false
+ }
+ }
+ return true
+}
+
+func isNestedComprehension(e ast.NavigableExpr) bool {
+ parent, found := e.Parent()
+ for found {
+ if parent.Kind() == ast.ComprehensionKind {
+ return true
+ }
+ parent, found = parent.Parent()
+ }
+ return false
+}
+
+func aggregateLiteralMatcher(e ast.NavigableExpr) bool {
+ return e.Kind() == ast.ListKind || e.Kind() == ast.MapKind || e.Kind() == ast.StructKind
+}
+
+var (
+ constantMatcher = ast.ConstantValueMatcher()
+)
+
+const (
+ defaultMaxConstantFoldIterations = 100
+)
diff --git a/vendor/github.com/google/cel-go/cel/inlining.go b/vendor/github.com/google/cel-go/cel/inlining.go
new file mode 100644
index 000000000..a4530e19e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/inlining.go
@@ -0,0 +1,228 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// InlineVariable holds a variable name to be matched and an AST representing
+// the expression graph which should be used to replace it.
+type InlineVariable struct {
+ name string
+ alias string
+ def *ast.AST
+}
+
+// Name returns the qualified variable or field selection to replace.
+func (v *InlineVariable) Name() string {
+ return v.name
+}
+
+// Alias returns the alias to use when performing cel.bind() calls during inlining.
+func (v *InlineVariable) Alias() string {
+ return v.alias
+}
+
+// Expr returns the inlined expression value.
+func (v *InlineVariable) Expr() ast.Expr {
+ return v.def.Expr()
+}
+
+// Type indicates the inlined expression type.
+func (v *InlineVariable) Type() *Type {
+ return v.def.GetType(v.def.Expr().ID())
+}
+
+// NewInlineVariable declares a variable name to be replaced by a checked expression.
+func NewInlineVariable(name string, definition *Ast) *InlineVariable {
+ return NewInlineVariableWithAlias(name, name, definition)
+}
+
+// NewInlineVariableWithAlias declares a variable name to be replaced by a checked expression.
+// If the variable occurs more than once, the provided alias will be used to replace the expressions
+// where the variable name occurs.
+func NewInlineVariableWithAlias(name, alias string, definition *Ast) *InlineVariable {
+ return &InlineVariable{name: name, alias: alias, def: definition.NativeRep()}
+}
+
+// NewInliningOptimizer creates and optimizer which replaces variables with expression definitions.
+//
+// If a variable occurs one time, the variable is replaced by the inline definition. If the
+// variable occurs more than once, the variable occurences are replaced by a cel.bind() call.
+func NewInliningOptimizer(inlineVars ...*InlineVariable) ASTOptimizer {
+ return &inliningOptimizer{variables: inlineVars}
+}
+
+type inliningOptimizer struct {
+ variables []*InlineVariable
+}
+
+func (opt *inliningOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST {
+ root := ast.NavigateAST(a)
+ for _, inlineVar := range opt.variables {
+ matches := ast.MatchDescendants(root, opt.matchVariable(inlineVar.Name()))
+ // Skip cases where the variable isn't in the expression graph
+ if len(matches) == 0 {
+ continue
+ }
+
+ // For a single match, do a direct replacement of the expression sub-graph.
+ if len(matches) == 1 || !isBindable(matches, inlineVar.Expr(), inlineVar.Type()) {
+ for _, match := range matches {
+ // Copy the inlined AST expr and source info.
+ copyExpr := ctx.CopyASTAndMetadata(inlineVar.def)
+ opt.inlineExpr(ctx, match, copyExpr, inlineVar.Type())
+ }
+ continue
+ }
+
+ // For multiple matches, find the least common ancestor (lca) and insert the
+ // variable as a cel.bind() macro.
+ var lca ast.NavigableExpr = root
+ lcaAncestorCount := 0
+ ancestors := map[int64]int{}
+ for _, match := range matches {
+ // Update the identifier matches with the provided alias.
+ parent, found := match, true
+ for found {
+ ancestorCount, hasAncestor := ancestors[parent.ID()]
+ if !hasAncestor {
+ ancestors[parent.ID()] = 1
+ parent, found = parent.Parent()
+ continue
+ }
+ if lcaAncestorCount < ancestorCount || (lcaAncestorCount == ancestorCount && lca.Depth() < parent.Depth()) {
+ lca = parent
+ lcaAncestorCount = ancestorCount
+ }
+ ancestors[parent.ID()] = ancestorCount + 1
+ parent, found = parent.Parent()
+ }
+ aliasExpr := ctx.NewIdent(inlineVar.Alias())
+ opt.inlineExpr(ctx, match, aliasExpr, inlineVar.Type())
+ }
+
+ // Copy the inlined AST expr and source info.
+ copyExpr := ctx.CopyASTAndMetadata(inlineVar.def)
+ // Update the least common ancestor by inserting a cel.bind() call to the alias.
+ inlined, bindMacro := ctx.NewBindMacro(lca.ID(), inlineVar.Alias(), copyExpr, lca)
+ opt.inlineExpr(ctx, lca, inlined, inlineVar.Type())
+ ctx.SetMacroCall(lca.ID(), bindMacro)
+ }
+ return a
+}
+
+// inlineExpr replaces the current expression with the inlined one, unless the location of the inlining
+// happens within a presence test, e.g. has(a.b.c) -> inline alpha for a.b.c in which case an attempt is
+// made to determine whether the inlined value can be presence or existence tested.
+func (opt *inliningOptimizer) inlineExpr(ctx *OptimizerContext, prev ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) {
+ switch prev.Kind() {
+ case ast.SelectKind:
+ sel := prev.AsSelect()
+ if !sel.IsTestOnly() {
+ ctx.UpdateExpr(prev, inlined)
+ return
+ }
+ opt.rewritePresenceExpr(ctx, prev, inlined, inlinedType)
+ default:
+ ctx.UpdateExpr(prev, inlined)
+ }
+}
+
+// rewritePresenceExpr converts the inlined expression, when it occurs within a has() macro, to type-safe
+// expression appropriate for the inlined type, if possible.
+//
+// If the rewrite is not possible an error is reported at the inline expression site.
+func (opt *inliningOptimizer) rewritePresenceExpr(ctx *OptimizerContext, prev, inlined ast.Expr, inlinedType *Type) {
+ // If the input inlined expression is not a select expression it won't work with the has()
+ // macro. Attempt to rewrite the presence test in terms of the typed input, otherwise error.
+ if inlined.Kind() == ast.SelectKind {
+ presenceTest, hasMacro := ctx.NewHasMacro(prev.ID(), inlined)
+ ctx.UpdateExpr(prev, presenceTest)
+ ctx.SetMacroCall(prev.ID(), hasMacro)
+ return
+ }
+
+ ctx.ClearMacroCall(prev.ID())
+ if inlinedType.IsAssignableType(NullType) {
+ ctx.UpdateExpr(prev,
+ ctx.NewCall(operators.NotEquals,
+ inlined,
+ ctx.NewLiteral(types.NullValue),
+ ))
+ return
+ }
+ if inlinedType.HasTrait(traits.SizerType) {
+ ctx.UpdateExpr(prev,
+ ctx.NewCall(operators.NotEquals,
+ ctx.NewMemberCall(overloads.Size, inlined),
+ ctx.NewLiteral(types.IntZero),
+ ))
+ return
+ }
+ ctx.ReportErrorAtID(prev.ID(), "unable to inline expression type %v into presence test", inlinedType)
+}
+
+// isBindable indicates whether the inlined type can be used within a cel.bind() if the expression
+// being replaced occurs within a presence test. Value types with a size() method or field selection
+// support can be bound.
+//
+// In future iterations, support may also be added for indexer types which can be rewritten as an `in`
+// expression; however, this would imply a rewrite of the inlined expression that may not be necessary
+// in most cases.
+func isBindable(matches []ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) bool {
+ if inlinedType.IsAssignableType(NullType) ||
+ inlinedType.HasTrait(traits.SizerType) {
+ return true
+ }
+ for _, m := range matches {
+ if m.Kind() != ast.SelectKind {
+ continue
+ }
+ sel := m.AsSelect()
+ if sel.IsTestOnly() {
+ return false
+ }
+ }
+ return true
+}
+
+// matchVariable matches simple identifiers, select expressions, and presence test expressions
+// which match the (potentially) qualified variable name provided as input.
+//
+// Note, this function does not support inlining against select expressions which includes optional
+// field selection. This may be a future refinement.
+func (opt *inliningOptimizer) matchVariable(varName string) ast.ExprMatcher {
+ return func(e ast.NavigableExpr) bool {
+ if e.Kind() == ast.IdentKind && e.AsIdent() == varName {
+ return true
+ }
+ if e.Kind() == ast.SelectKind {
+ sel := e.AsSelect()
+ // While the `ToQualifiedName` call could take the select directly, this
+ // would skip presence tests from possible matches, which we would like
+ // to include.
+ qualName, found := containers.ToQualifiedName(sel.Operand())
+ return found && qualName+"."+sel.FieldName() == varName
+ }
+ return false
+ }
+}
diff --git a/vendor/github.com/google/cel-go/cel/io.go b/vendor/github.com/google/cel-go/cel/io.go
index 80f63140e..2e611228d 100644
--- a/vendor/github.com/google/cel-go/cel/io.go
+++ b/vendor/github.com/google/cel-go/cel/io.go
@@ -28,6 +28,7 @@ import (
"github.com/google/cel-go/common/types/traits"
"github.com/google/cel-go/parser"
+ celpb "cel.dev/expr"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
anypb "google.golang.org/protobuf/types/known/anypb"
)
@@ -47,17 +48,11 @@ func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast {
//
// Prefer CheckedExprToAst if loading expressions from storage.
func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) (*Ast, error) {
- checkedAST, err := ast.CheckedExprToCheckedAST(checkedExpr)
+ checked, err := ast.ToAST(checkedExpr)
if err != nil {
return nil, err
}
- return &Ast{
- expr: checkedAST.Expr,
- info: checkedAST.SourceInfo,
- source: src,
- refMap: checkedAST.ReferenceMap,
- typeMap: checkedAST.TypeMap,
- }, nil
+ return &Ast{source: src, impl: checked}, nil
}
// AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value.
@@ -67,13 +62,7 @@ func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) {
if !a.IsChecked() {
return nil, fmt.Errorf("cannot convert unchecked ast")
}
- cAst := &ast.CheckedAST{
- Expr: a.expr,
- SourceInfo: a.info,
- ReferenceMap: a.refMap,
- TypeMap: a.typeMap,
- }
- return ast.CheckedASTToCheckedExpr(cAst)
+ return ast.ToProto(a.NativeRep())
}
// ParsedExprToAst converts a parsed expression proto message to an Ast.
@@ -89,18 +78,12 @@ func ParsedExprToAst(parsedExpr *exprpb.ParsedExpr) *Ast {
//
// Prefer ParsedExprToAst if loading expressions from storage.
func ParsedExprToAstWithSource(parsedExpr *exprpb.ParsedExpr, src Source) *Ast {
- si := parsedExpr.GetSourceInfo()
- if si == nil {
- si = &exprpb.SourceInfo{}
- }
+ info, _ := ast.ProtoToSourceInfo(parsedExpr.GetSourceInfo())
if src == nil {
- src = common.NewInfoSource(si)
- }
- return &Ast{
- expr: parsedExpr.GetExpr(),
- info: si,
- source: src,
+ src = common.NewInfoSource(parsedExpr.GetSourceInfo())
}
+ e, _ := ast.ProtoToExpr(parsedExpr.GetExpr())
+ return &Ast{source: src, impl: ast.NewAST(e, info)}
}
// AstToParsedExpr converts an Ast to an protobuf ParsedExpr value.
@@ -116,80 +99,151 @@ func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) {
// Note, the conversion may not be an exact replica of the original expression, but will produce
// a string that is semantically equivalent and whose textual representation is stable.
func AstToString(a *Ast) (string, error) {
- expr := a.Expr()
- info := a.SourceInfo()
- return parser.Unparse(expr, info)
+ return ExprToString(a.NativeRep().Expr(), a.NativeRep().SourceInfo())
+}
+
+// ExprToString converts an AST Expr node back to a string using macro call tracking metadata from
+// source info if any macros are encountered within the expression.
+func ExprToString(e ast.Expr, info *ast.SourceInfo) (string, error) {
+ return parser.Unparse(e, info)
}
-// RefValueToValue converts between ref.Val and api.expr.Value.
+// RefValueToValue converts between ref.Val and google.api.expr.v1alpha1.Value.
// The result Value is the serialized proto form. The ref.Val must not be error or unknown.
func RefValueToValue(res ref.Val) (*exprpb.Value, error) {
+ return ValueAsAlphaProto(res)
+}
+
+// ValueAsAlphaProto converts between ref.Val and google.api.expr.v1alpha1.Value.
+// The result Value is the serialized proto form. The ref.Val must not be error or unknown.
+func ValueAsAlphaProto(res ref.Val) (*exprpb.Value, error) {
+ canonical, err := ValueAsProto(res)
+ if err != nil {
+ return nil, err
+ }
+ alpha := &exprpb.Value{}
+ err = convertProto(canonical, alpha)
+ return alpha, err
+}
+
+// RefValToExprValue converts between ref.Val and google.api.expr.v1alpha1.ExprValue.
+// The result ExprValue is the serialized proto form.
+func RefValToExprValue(res ref.Val) (*exprpb.ExprValue, error) {
+ return ExprValueAsAlphaProto(res)
+}
+
+// ExprValueAsAlphaProto converts between ref.Val and google.api.expr.v1alpha1.ExprValue.
+// The result ExprValue is the serialized proto form.
+func ExprValueAsAlphaProto(res ref.Val) (*exprpb.ExprValue, error) {
+ canonical, err := ExprValueAsProto(res)
+ if err != nil {
+ return nil, err
+ }
+ alpha := &exprpb.ExprValue{}
+ err = convertProto(canonical, alpha)
+ return alpha, err
+}
+
+// ExprValueAsProto converts between ref.Val and cel.expr.ExprValue.
+// The result ExprValue is the serialized proto form.
+func ExprValueAsProto(res ref.Val) (*celpb.ExprValue, error) {
+ switch res := res.(type) {
+ case *types.Unknown:
+ return &celpb.ExprValue{
+ Kind: &celpb.ExprValue_Unknown{
+ Unknown: &celpb.UnknownSet{
+ Exprs: res.IDs(),
+ },
+ }}, nil
+ case *types.Err:
+ return &celpb.ExprValue{
+ Kind: &celpb.ExprValue_Error{
+ Error: &celpb.ErrorSet{
+ // Keeping the error code as UNKNOWN since there's no error codes associated with
+ // Cel-Go runtime errors.
+ Errors: []*celpb.Status{{Code: 2, Message: res.Error()}},
+ },
+ },
+ }, nil
+ default:
+ val, err := ValueAsProto(res)
+ if err != nil {
+ return nil, err
+ }
+ return &celpb.ExprValue{
+ Kind: &celpb.ExprValue_Value{Value: val}}, nil
+ }
+}
+
+// ValueAsProto converts between ref.Val and cel.expr.Value.
+// The result Value is the serialized proto form. The ref.Val must not be error or unknown.
+func ValueAsProto(res ref.Val) (*celpb.Value, error) {
switch res.Type() {
case types.BoolType:
- return &exprpb.Value{
- Kind: &exprpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil
+ return &celpb.Value{
+ Kind: &celpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil
case types.BytesType:
- return &exprpb.Value{
- Kind: &exprpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil
+ return &celpb.Value{
+ Kind: &celpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil
case types.DoubleType:
- return &exprpb.Value{
- Kind: &exprpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil
+ return &celpb.Value{
+ Kind: &celpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil
case types.IntType:
- return &exprpb.Value{
- Kind: &exprpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil
+ return &celpb.Value{
+ Kind: &celpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil
case types.ListType:
l := res.(traits.Lister)
sz := l.Size().(types.Int)
- elts := make([]*exprpb.Value, 0, int64(sz))
+ elts := make([]*celpb.Value, 0, int64(sz))
for i := types.Int(0); i < sz; i++ {
- v, err := RefValueToValue(l.Get(i))
+ v, err := ValueAsProto(l.Get(i))
if err != nil {
return nil, err
}
elts = append(elts, v)
}
- return &exprpb.Value{
- Kind: &exprpb.Value_ListValue{
- ListValue: &exprpb.ListValue{Values: elts}}}, nil
+ return &celpb.Value{
+ Kind: &celpb.Value_ListValue{
+ ListValue: &celpb.ListValue{Values: elts}}}, nil
case types.MapType:
mapper := res.(traits.Mapper)
sz := mapper.Size().(types.Int)
- entries := make([]*exprpb.MapValue_Entry, 0, int64(sz))
+ entries := make([]*celpb.MapValue_Entry, 0, int64(sz))
for it := mapper.Iterator(); it.HasNext().(types.Bool); {
k := it.Next()
v := mapper.Get(k)
- kv, err := RefValueToValue(k)
+ kv, err := ValueAsProto(k)
if err != nil {
return nil, err
}
- vv, err := RefValueToValue(v)
+ vv, err := ValueAsProto(v)
if err != nil {
return nil, err
}
- entries = append(entries, &exprpb.MapValue_Entry{Key: kv, Value: vv})
+ entries = append(entries, &celpb.MapValue_Entry{Key: kv, Value: vv})
}
- return &exprpb.Value{
- Kind: &exprpb.Value_MapValue{
- MapValue: &exprpb.MapValue{Entries: entries}}}, nil
+ return &celpb.Value{
+ Kind: &celpb.Value_MapValue{
+ MapValue: &celpb.MapValue{Entries: entries}}}, nil
case types.NullType:
- return &exprpb.Value{
- Kind: &exprpb.Value_NullValue{}}, nil
+ return &celpb.Value{
+ Kind: &celpb.Value_NullValue{}}, nil
case types.StringType:
- return &exprpb.Value{
- Kind: &exprpb.Value_StringValue{StringValue: res.Value().(string)}}, nil
+ return &celpb.Value{
+ Kind: &celpb.Value_StringValue{StringValue: res.Value().(string)}}, nil
case types.TypeType:
typeName := res.(ref.Type).TypeName()
- return &exprpb.Value{Kind: &exprpb.Value_TypeValue{TypeValue: typeName}}, nil
+ return &celpb.Value{Kind: &celpb.Value_TypeValue{TypeValue: typeName}}, nil
case types.UintType:
- return &exprpb.Value{
- Kind: &exprpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil
+ return &celpb.Value{
+ Kind: &celpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil
default:
any, err := res.ConvertToNative(anyPbType)
if err != nil {
return nil, err
}
- return &exprpb.Value{
- Kind: &exprpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil
+ return &celpb.Value{
+ Kind: &celpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil
}
}
@@ -210,57 +264,71 @@ var (
anyPbType = reflect.TypeOf(&anypb.Any{})
)
-// ValueToRefValue converts between exprpb.Value and ref.Val.
+// ValueToRefValue converts between google.api.expr.v1alpha1.Value and ref.Val.
func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) {
+ return AlphaProtoAsValue(adapter, v)
+}
+
+// AlphaProtoAsValue converts between google.api.expr.v1alpha1.Value and ref.Val.
+func AlphaProtoAsValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) {
+ canonical := &celpb.Value{}
+ if err := convertProto(v, canonical); err != nil {
+ return nil, err
+ }
+ return ProtoAsValue(adapter, canonical)
+}
+
+// ProtoAsValue converts between cel.expr.Value and ref.Val.
+func ProtoAsValue(adapter types.Adapter, v *celpb.Value) (ref.Val, error) {
switch v.Kind.(type) {
- case *exprpb.Value_NullValue:
+ case *celpb.Value_NullValue:
return types.NullValue, nil
- case *exprpb.Value_BoolValue:
+ case *celpb.Value_BoolValue:
return types.Bool(v.GetBoolValue()), nil
- case *exprpb.Value_Int64Value:
+ case *celpb.Value_Int64Value:
return types.Int(v.GetInt64Value()), nil
- case *exprpb.Value_Uint64Value:
+ case *celpb.Value_Uint64Value:
return types.Uint(v.GetUint64Value()), nil
- case *exprpb.Value_DoubleValue:
+ case *celpb.Value_DoubleValue:
return types.Double(v.GetDoubleValue()), nil
- case *exprpb.Value_StringValue:
+ case *celpb.Value_StringValue:
return types.String(v.GetStringValue()), nil
- case *exprpb.Value_BytesValue:
+ case *celpb.Value_BytesValue:
return types.Bytes(v.GetBytesValue()), nil
- case *exprpb.Value_ObjectValue:
+ case *celpb.Value_ObjectValue:
any := v.GetObjectValue()
msg, err := anypb.UnmarshalNew(any, proto.UnmarshalOptions{DiscardUnknown: true})
if err != nil {
return nil, err
}
return adapter.NativeToValue(msg), nil
- case *exprpb.Value_MapValue:
+ case *celpb.Value_MapValue:
m := v.GetMapValue()
entries := make(map[ref.Val]ref.Val)
for _, entry := range m.Entries {
- key, err := ValueToRefValue(adapter, entry.Key)
+ key, err := ProtoAsValue(adapter, entry.Key)
if err != nil {
return nil, err
}
- pb, err := ValueToRefValue(adapter, entry.Value)
+ pb, err := ProtoAsValue(adapter, entry.Value)
if err != nil {
return nil, err
}
entries[key] = pb
}
return adapter.NativeToValue(entries), nil
- case *exprpb.Value_ListValue:
+ case *celpb.Value_ListValue:
l := v.GetListValue()
elts := make([]ref.Val, len(l.Values))
for i, e := range l.Values {
- rv, err := ValueToRefValue(adapter, e)
+ rv, err := ProtoAsValue(adapter, e)
if err != nil {
return nil, err
}
elts[i] = rv
}
return adapter.NativeToValue(elts), nil
- case *exprpb.Value_TypeValue:
+ case *celpb.Value_TypeValue:
typeName := v.GetTypeValue()
tv, ok := typeNameToTypeValue[typeName]
if ok {
@@ -270,3 +338,12 @@ func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) {
}
return nil, errors.New("unknown value")
}
+
+func convertProto(src, dst proto.Message) error {
+ pb, err := proto.Marshal(src)
+ if err != nil {
+ return err
+ }
+ err = proto.Unmarshal(pb, dst)
+ return err
+}
diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go
index 4d232085c..59a10e81d 100644
--- a/vendor/github.com/google/cel-go/cel/library.go
+++ b/vendor/github.com/google/cel-go/cel/library.go
@@ -15,11 +15,13 @@
package cel
import (
+ "fmt"
"math"
- "strconv"
- "strings"
- "time"
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/env"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/stdlib"
@@ -28,17 +30,17 @@ import (
"github.com/google/cel-go/common/types/traits"
"github.com/google/cel-go/interpreter"
"github.com/google/cel-go/parser"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
const (
optMapMacro = "optMap"
optFlatMapMacro = "optFlatMap"
hasValueFunc = "hasValue"
+ unwrapOptFunc = "unwrapOpt"
optionalNoneFunc = "optional.none"
optionalOfFunc = "optional.of"
optionalOfNonZeroValueFunc = "optional.ofNonZeroValue"
+ optionalUnwrapFunc = "optional.unwrap"
valueFunc = "value"
unusedIterVar = "#unused"
)
@@ -69,6 +71,23 @@ type SingletonLibrary interface {
LibraryName() string
}
+// LibraryAliaser generates a simple named alias for the library, for use during environment serialization.
+type LibraryAliaser interface {
+ LibraryAlias() string
+}
+
+// LibrarySubsetter provides the subset description associated with the library, nil if not subset.
+type LibrarySubsetter interface {
+ LibrarySubset() *env.LibrarySubset
+}
+
+// LibraryVersioner provides a version number for the library.
+//
+// If not implemented, the library version will be flagged as 'latest' during environment serialization.
+type LibraryVersioner interface {
+ LibraryVersion() uint32
+}
+
// Lib creates an EnvOption out of a Library, allowing libraries to be provided as functional args,
// and to be linked to each other.
func Lib(l Library) EnvOption {
@@ -78,7 +97,7 @@ func Lib(l Library) EnvOption {
if e.HasLibrary(singleton.LibraryName()) {
return e, nil
}
- e.libraries[singleton.LibraryName()] = true
+ e.libraries[singleton.LibraryName()] = singleton
}
var err error
for _, opt := range l.CompileOptions() {
@@ -92,26 +111,79 @@ func Lib(l Library) EnvOption {
}
}
+// StdLibOption specifies a functional option for configuring the standard CEL library.
+type StdLibOption func(*stdLibrary) *stdLibrary
+
+// StdLibSubset configures the standard library to use a subset of its functions and macros.
+//
+// Since the StdLib is a singleton library, only the first instance of the StdLib() environment options
+// will be configured on the environment which means only the StdLibSubset() initially configured with
+// the library will be used.
+func StdLibSubset(subset *env.LibrarySubset) StdLibOption {
+ return func(lib *stdLibrary) *stdLibrary {
+ lib.subset = subset
+ return lib
+ }
+}
+
// StdLib returns an EnvOption for the standard library of CEL functions and macros.
-func StdLib() EnvOption {
- return Lib(stdLibrary{})
+func StdLib(opts ...StdLibOption) EnvOption {
+ lib := &stdLibrary{}
+ for _, o := range opts {
+ lib = o(lib)
+ }
+ return Lib(lib)
}
// stdLibrary implements the Library interface and provides functional options for the core CEL
// features documented in the specification.
-type stdLibrary struct{}
+type stdLibrary struct {
+ subset *env.LibrarySubset
+}
// LibraryName implements the SingletonLibrary interface method.
-func (stdLibrary) LibraryName() string {
+func (*stdLibrary) LibraryName() string {
return "cel.lib.std"
}
+// LibraryAlias returns the simple name of the library.
+func (*stdLibrary) LibraryAlias() string {
+ return "stdlib"
+}
+
+// LibrarySubset returns the env.LibrarySubset definition associated with the CEL Library.
+func (lib *stdLibrary) LibrarySubset() *env.LibrarySubset {
+ return lib.subset
+}
+
// CompileOptions returns options for the standard CEL function declarations and macros.
-func (stdLibrary) CompileOptions() []EnvOption {
+func (lib *stdLibrary) CompileOptions() []EnvOption {
+ funcs := stdlib.Functions()
+ macros := StandardMacros
+ if lib.subset != nil {
+ subMacros := []Macro{}
+ for _, m := range macros {
+ if lib.subset.SubsetMacro(m.Function()) {
+ subMacros = append(subMacros, m)
+ }
+ }
+ macros = subMacros
+ subFuncs := []*decls.FunctionDecl{}
+ for _, fn := range funcs {
+ if f, include := lib.subset.SubsetFunction(fn); include {
+ subFuncs = append(subFuncs, f)
+ }
+ }
+ funcs = subFuncs
+ }
return []EnvOption{
func(e *Env) (*Env, error) {
var err error
- for _, fn := range stdlib.Functions() {
+ if err = lib.subset.Validate(); err != nil {
+ return nil, err
+ }
+ e.variables = append(e.variables, stdlib.Types()...)
+ for _, fn := range funcs {
existing, found := e.functions[fn.Name()]
if found {
fn, err = existing.Merge(fn)
@@ -123,16 +195,12 @@ func (stdLibrary) CompileOptions() []EnvOption {
}
return e, nil
},
- func(e *Env) (*Env, error) {
- e.variables = append(e.variables, stdlib.Types()...)
- return e, nil
- },
- Macros(StandardMacros...),
+ Macros(macros...),
}
}
// ProgramOptions returns function implementations for the standard CEL functions.
-func (stdLibrary) ProgramOptions() []ProgramOption {
+func (*stdLibrary) ProgramOptions() []ProgramOption {
return []ProgramOption{}
}
@@ -261,6 +329,36 @@ func (stdLibrary) ProgramOptions() []ProgramOption {
// be expressed with `optMap`.
//
// msg.?elements.optFlatMap(e, e[?0]) // return the first element if present.
+//
+// # First
+//
+// Introduced in version: 2
+//
+// Returns an optional with the first value from the right hand list, or
+// optional.None.
+//
+// [1, 2, 3].first().value() == 1
+//
+// # Last
+//
+// Introduced in version: 2
+//
+// Returns an optional with the last value from the right hand list, or
+// optional.None.
+//
+// [1, 2, 3].last().value() == 3
+//
+// This is syntactic sugar for msg.elements[msg.elements.size()-1].
+//
+// # Unwrap / UnwrapOpt
+//
+// Introduced in version: 2
+//
+// Returns a list of all the values that are not none in the input list of optional values.
+// Can be used as optional.unwrap(List[T]) or with postfix notation: List[T].unwrapOpt()
+//
+// optional.unwrap([optional.of(42), optional.none()]) == [42]
+// [optional.of(42), optional.none()].unwrapOpt() == [42]
func OptionalTypes(opts ...OptionalTypesOption) EnvOption {
lib := &optionalLib{version: math.MaxUint32}
for _, opt := range opts {
@@ -293,10 +391,20 @@ func OptionalTypesVersion(version uint32) OptionalTypesOption {
}
// LibraryName implements the SingletonLibrary interface method.
-func (lib *optionalLib) LibraryName() string {
+func (*optionalLib) LibraryName() string {
return "cel.lib.optional"
}
+// LibraryAlias returns the simple name of the library.
+func (*optionalLib) LibraryAlias() string {
+ return "optional"
+}
+
+// LibraryVersion returns the version of the library.
+func (lib *optionalLib) LibraryVersion() uint32 {
+ return lib.version
+}
+
// CompileOptions implements the Library interface method.
func (lib *optionalLib) CompileOptions() []EnvOption {
paramTypeK := TypeParamType("K")
@@ -304,6 +412,7 @@ func (lib *optionalLib) CompileOptions() []EnvOption {
optionalTypeV := OptionalType(paramTypeV)
listTypeV := ListType(paramTypeV)
mapTypeKV := MapType(paramTypeK, paramTypeV)
+ listOptionalTypeV := ListType(optionalTypeV)
opts := []EnvOption{
// Enable the optional syntax in the parser.
@@ -313,16 +422,29 @@ func (lib *optionalLib) CompileOptions() []EnvOption {
Types(types.OptionalType),
// Configure the optMap and optFlatMap macros.
- Macros(NewReceiverMacro(optMapMacro, 2, optMap)),
+ Macros(ReceiverMacro(optMapMacro, 2, optMap,
+ MacroDocs(`perform computation on the value if present and return the result as an optional`),
+ MacroExamples(
+ common.MultilineDescription(
+ `// sub with the prefix 'dev.cel' or optional.none()`,
+ `request.auth.tokens.?sub.optMap(id, 'dev.cel.' + id)`),
+ `optional.none().optMap(i, i * 2) // optional.none()`))),
// Global and member functions for working with optional values.
Function(optionalOfFunc,
+ FunctionDocs(`create a new optional_type(T) with a value where any value is considered valid`),
Overload("optional_of", []*Type{paramTypeV}, optionalTypeV,
+ OverloadExamples(`optional.of(1) // optional(1)`),
UnaryBinding(func(value ref.Val) ref.Val {
return types.OptionalOf(value)
}))),
Function(optionalOfNonZeroValueFunc,
+ FunctionDocs(`create a new optional_type(T) with a value, if the value is not a zero or empty value`),
Overload("optional_ofNonZeroValue", []*Type{paramTypeV}, optionalTypeV,
+ OverloadExamples(
+ `optional.ofNonZeroValue(null) // optional.none()`,
+ `optional.ofNonZeroValue("") // optional.none()`,
+ `optional.ofNonZeroValue("hello") // optional.of('hello')`),
UnaryBinding(func(value ref.Val) ref.Val {
v, isZeroer := value.(traits.Zeroer)
if !isZeroer || !v.IsZeroValue() {
@@ -331,18 +453,26 @@ func (lib *optionalLib) CompileOptions() []EnvOption {
return types.OptionalNone
}))),
Function(optionalNoneFunc,
+ FunctionDocs(`singleton value representing an optional without a value`),
Overload("optional_none", []*Type{}, optionalTypeV,
+ OverloadExamples(`optional.none()`),
FunctionBinding(func(values ...ref.Val) ref.Val {
return types.OptionalNone
}))),
Function(valueFunc,
+ FunctionDocs(`obtain the value contained by the optional, error if optional.none()`),
MemberOverload("optional_value", []*Type{optionalTypeV}, paramTypeV,
+ OverloadExamples(
+ `optional.of(1).value() // 1`,
+ `optional.none().value() // error`),
UnaryBinding(func(value ref.Val) ref.Val {
opt := value.(*types.Optional)
return opt.GetValue()
}))),
Function(hasValueFunc,
+ FunctionDocs(`determine whether the optional contains a value`),
MemberOverload("optional_hasValue", []*Type{optionalTypeV}, BoolType,
+ OverloadExamples(`optional.of({1: 2}).hasValue() // true`),
UnaryBinding(func(value ref.Val) ref.Val {
opt := value.(*types.Optional)
return types.Bool(opt.HasValue())
@@ -351,21 +481,43 @@ func (lib *optionalLib) CompileOptions() []EnvOption {
// Implementation of 'or' and 'orValue' are special-cased to support short-circuiting in the
// evaluation chain.
Function("or",
- MemberOverload("optional_or_optional", []*Type{optionalTypeV, optionalTypeV}, optionalTypeV)),
+ FunctionDocs(`chain optional expressions together, picking the first valued optional expression`),
+ MemberOverload("optional_or_optional", []*Type{optionalTypeV, optionalTypeV}, optionalTypeV,
+ OverloadExamples(
+ `optional.none().or(optional.of(1)) // optional.of(1)`,
+ common.MultilineDescription(
+ `// either a value from the first list, a value from the second, or optional.none()`,
+ `[1, 2, 3][?x].or([3, 4, 5][?y])`)))),
Function("orValue",
- MemberOverload("optional_orValue_value", []*Type{optionalTypeV, paramTypeV}, paramTypeV)),
+ FunctionDocs(`chain optional expressions together picking the first valued optional or the default value`),
+ MemberOverload("optional_orValue_value", []*Type{optionalTypeV, paramTypeV}, paramTypeV,
+ OverloadExamples(
+ common.MultilineDescription(
+ `// pick the value for the given key if the key exists, otherwise return 'you'`,
+ `{'hello': 'world', 'goodbye': 'cruel world'}[?greeting].orValue('you')`)))),
// OptSelect is handled specially by the type-checker, so the receiver's field type is used to determine the
// optput type.
Function(operators.OptSelect,
- Overload("select_optional_field", []*Type{DynType, StringType}, optionalTypeV)),
+ FunctionDocs(`if the field is present create an optional of the field value, otherwise return optional.none()`),
+ Overload("select_optional_field", []*Type{DynType, StringType}, optionalTypeV,
+ OverloadExamples(
+ `msg.?field // optional.of(field) if non-empty, otherwise optional.none()`,
+ `msg.?field.?nested_field // optional.of(nested_field) if both field and nested_field are non-empty.`))),
// OptIndex is handled mostly like any other indexing operation on a list or map, so the type-checker can use
// these signatures to determine type-agreement without any special handling.
Function(operators.OptIndex,
- Overload("list_optindex_optional_int", []*Type{listTypeV, IntType}, optionalTypeV),
+ FunctionDocs(`if the index is present create an optional of the field value, otherwise return optional.none()`),
+ Overload("list_optindex_optional_int", []*Type{listTypeV, IntType}, optionalTypeV,
+ OverloadExamples(`[1, 2, 3][?x] // element value if x is in the list size, else optional.none()`)),
Overload("optional_list_optindex_optional_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
- Overload("map_optindex_optional_value", []*Type{mapTypeKV, paramTypeK}, optionalTypeV),
+ Overload("map_optindex_optional_value", []*Type{mapTypeKV, paramTypeK}, optionalTypeV,
+ OverloadExamples(
+ `map_value[?key] // value at the key if present, else optional.none()`,
+ common.MultilineDescription(
+ `// map key-value if index is a valid map key, else optional.none()`,
+ `{0: 2, 2: 4, 6: 8}[?index]`))),
Overload("optional_map_optindex_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
// Index overloads to accommodate using an optional value as the operand.
@@ -374,8 +526,65 @@ func (lib *optionalLib) CompileOptions() []EnvOption {
Overload("optional_map_index_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
}
if lib.version >= 1 {
- opts = append(opts, Macros(NewReceiverMacro(optFlatMapMacro, 2, optFlatMap)))
+ opts = append(opts, Macros(ReceiverMacro(optFlatMapMacro, 2, optFlatMap,
+ MacroDocs(`perform computation on the value if present and produce an optional value within the computation`),
+ MacroExamples(
+ common.MultilineDescription(
+ `// m = {'key': {}}`,
+ `m.?key.optFlatMap(k, k.?subkey) // optional.none()`),
+ common.MultilineDescription(
+ `// m = {'key': {'subkey': 'value'}}`,
+ `m.?key.optFlatMap(k, k.?subkey) // optional.of('value')`),
+ ))))
+ }
+
+ if lib.version >= 2 {
+ opts = append(opts, Function("last",
+ FunctionDocs(`return the last value in a list if present, otherwise optional.none()`),
+ MemberOverload("list_last", []*Type{listTypeV}, optionalTypeV,
+ OverloadExamples(
+ `[].last() // optional.none()`,
+ `[1, 2, 3].last() ? optional.of(3)`),
+ UnaryBinding(func(v ref.Val) ref.Val {
+ list := v.(traits.Lister)
+ sz := list.Size().(types.Int)
+ if sz == types.IntZero {
+ return types.OptionalNone
+ }
+ return types.OptionalOf(list.Get(types.Int(sz - 1)))
+ }),
+ ),
+ ))
+
+ opts = append(opts, Function("first",
+ FunctionDocs(`return the first value in a list if present, otherwise optional.none()`),
+ MemberOverload("list_first", []*Type{listTypeV}, optionalTypeV,
+ OverloadExamples(
+ `[].first() // optional.none()`,
+ `[1, 2, 3].first() ? optional.of(1)`),
+ UnaryBinding(func(v ref.Val) ref.Val {
+ list := v.(traits.Lister)
+ sz := list.Size().(types.Int)
+ if sz == types.IntZero {
+ return types.OptionalNone
+ }
+ return types.OptionalOf(list.Get(types.Int(0)))
+ }),
+ ),
+ ))
+
+ opts = append(opts, Function(optionalUnwrapFunc,
+ FunctionDocs(`convert a list of optional values to a list containing only value which are not optional.none()`),
+ Overload("optional_unwrap", []*Type{listOptionalTypeV}, listTypeV,
+ OverloadExamples(`optional.unwrap([optional.of(1), optional.none()]) // [1]`),
+ UnaryBinding(optUnwrap))))
+ opts = append(opts, Function(unwrapOptFunc,
+ FunctionDocs(`convert a list of optional values to a list containing only value which are not optional.none()`),
+ MemberOverload("optional_unwrapOpt", []*Type{listOptionalTypeV}, listTypeV,
+ OverloadExamples(`[optional.of(1), optional.none()].unwrapOpt() // [1]`),
+ UnaryBinding(optUnwrap))))
}
+
return opts
}
@@ -386,60 +595,82 @@ func (lib *optionalLib) ProgramOptions() []ProgramOption {
}
}
-func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+// Version returns the current version of the library.
+func (lib *optionalLib) Version() uint32 {
+ return lib.version
+}
+
+func optMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) {
varIdent := args[0]
varName := ""
- switch varIdent.GetExprKind().(type) {
- case *exprpb.Expr_IdentExpr:
- varName = varIdent.GetIdentExpr().GetName()
+ switch varIdent.Kind() {
+ case ast.IdentKind:
+ varName = varIdent.AsIdent()
default:
- return nil, meh.NewError(varIdent.GetId(), "optMap() variable name must be a simple identifier")
+ return nil, meh.NewError(varIdent.ID(), "optMap() variable name must be a simple identifier")
}
mapExpr := args[1]
- return meh.GlobalCall(
+ return meh.NewCall(
operators.Conditional,
- meh.ReceiverCall(hasValueFunc, target),
- meh.GlobalCall(optionalOfFunc,
- meh.Fold(
- unusedIterVar,
+ meh.NewMemberCall(hasValueFunc, target),
+ meh.NewCall(optionalOfFunc,
+ meh.NewComprehension(
meh.NewList(),
+ unusedIterVar,
varName,
- meh.ReceiverCall(valueFunc, target),
- meh.LiteralBool(false),
- meh.Ident(varName),
+ meh.NewMemberCall(valueFunc, meh.Copy(target)),
+ meh.NewLiteral(types.False),
+ meh.NewIdent(varName),
mapExpr,
),
),
- meh.GlobalCall(optionalNoneFunc),
+ meh.NewCall(optionalNoneFunc),
), nil
}
-func optFlatMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+func optFlatMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) {
varIdent := args[0]
varName := ""
- switch varIdent.GetExprKind().(type) {
- case *exprpb.Expr_IdentExpr:
- varName = varIdent.GetIdentExpr().GetName()
+ switch varIdent.Kind() {
+ case ast.IdentKind:
+ varName = varIdent.AsIdent()
default:
- return nil, meh.NewError(varIdent.GetId(), "optFlatMap() variable name must be a simple identifier")
+ return nil, meh.NewError(varIdent.ID(), "optFlatMap() variable name must be a simple identifier")
}
mapExpr := args[1]
- return meh.GlobalCall(
+ return meh.NewCall(
operators.Conditional,
- meh.ReceiverCall(hasValueFunc, target),
- meh.Fold(
- unusedIterVar,
+ meh.NewMemberCall(hasValueFunc, target),
+ meh.NewComprehension(
meh.NewList(),
+ unusedIterVar,
varName,
- meh.ReceiverCall(valueFunc, target),
- meh.LiteralBool(false),
- meh.Ident(varName),
+ meh.NewMemberCall(valueFunc, meh.Copy(target)),
+ meh.NewLiteral(types.False),
+ meh.NewIdent(varName),
mapExpr,
),
- meh.GlobalCall(optionalNoneFunc),
+ meh.NewCall(optionalNoneFunc),
), nil
}
+func optUnwrap(value ref.Val) ref.Val {
+ list := value.(traits.Lister)
+ var unwrappedList []ref.Val
+ iter := list.Iterator()
+ for iter.HasNext() == types.True {
+ val := iter.Next()
+ opt, isOpt := val.(*types.Optional)
+ if !isOpt {
+ return types.WrapErr(fmt.Errorf("value %v is not optional", val))
+ }
+ if opt.HasValue() {
+ unwrappedList = append(unwrappedList, opt.GetValue())
+ }
+ }
+ return types.DefaultTypeAdapter.NativeToValue(unwrappedList)
+}
+
func enableOptionalSyntax() EnvOption {
return func(e *Env) (*Env, error) {
e.prsrOpts = append(e.prsrOpts, parser.EnableOptionalSyntax(true))
@@ -447,6 +678,12 @@ func enableOptionalSyntax() EnvOption {
}
}
+// EnableErrorOnBadPresenceTest enables error generation when a presence test or optional field
+// selection is performed on a primitive type.
+func EnableErrorOnBadPresenceTest(value bool) EnvOption {
+ return features(featureEnableErrorOnBadPresenceTest, value)
+}
+
func decorateOptionalOr(i interpreter.Interpretable) (interpreter.Interpretable, error) {
call, ok := i.(interpreter.InterpretableCall)
if !ok {
@@ -536,250 +773,99 @@ func (opt *evalOptionalOrValue) Eval(ctx interpreter.Activation) ref.Val {
return opt.rhs.Eval(ctx)
}
-type timeUTCLibrary struct{}
+type timeLegacyLibrary struct{}
-func (timeUTCLibrary) CompileOptions() []EnvOption {
+func (timeLegacyLibrary) CompileOptions() []EnvOption {
return timeOverloadDeclarations
}
-func (timeUTCLibrary) ProgramOptions() []ProgramOption {
+func (timeLegacyLibrary) ProgramOptions() []ProgramOption {
return []ProgramOption{}
}
// Declarations and functions which enable using UTC on time.Time inputs when the timezone is unspecified
// in the CEL expression.
var (
- utcTZ = types.String("UTC")
-
timeOverloadDeclarations = []EnvOption{
- Function(overloads.TimeGetHours,
- MemberOverload(overloads.DurationToHours, []*Type{DurationType}, IntType,
- UnaryBinding(types.DurationGetHours))),
- Function(overloads.TimeGetMinutes,
- MemberOverload(overloads.DurationToMinutes, []*Type{DurationType}, IntType,
- UnaryBinding(types.DurationGetMinutes))),
- Function(overloads.TimeGetSeconds,
- MemberOverload(overloads.DurationToSeconds, []*Type{DurationType}, IntType,
- UnaryBinding(types.DurationGetSeconds))),
- Function(overloads.TimeGetMilliseconds,
- MemberOverload(overloads.DurationToMilliseconds, []*Type{DurationType}, IntType,
- UnaryBinding(types.DurationGetMilliseconds))),
Function(overloads.TimeGetFullYear,
MemberOverload(overloads.TimestampToYear, []*Type{TimestampType}, IntType,
UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetFullYear(ts, utcTZ)
+ t := ts.(types.Timestamp)
+ return t.Receive(overloads.TimeGetFullYear, overloads.TimestampToYear, []ref.Val{})
}),
),
- MemberOverload(overloads.TimestampToYearWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetFullYear),
- ),
),
Function(overloads.TimeGetMonth,
MemberOverload(overloads.TimestampToMonth, []*Type{TimestampType}, IntType,
UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetMonth(ts, utcTZ)
+ t := ts.(types.Timestamp)
+ return t.Receive(overloads.TimeGetMonth, overloads.TimestampToMonth, []ref.Val{})
}),
),
- MemberOverload(overloads.TimestampToMonthWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetMonth),
- ),
),
Function(overloads.TimeGetDayOfYear,
MemberOverload(overloads.TimestampToDayOfYear, []*Type{TimestampType}, IntType,
UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetDayOfYear(ts, utcTZ)
- }),
- ),
- MemberOverload(overloads.TimestampToDayOfYearWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(func(ts, tz ref.Val) ref.Val {
- return timestampGetDayOfYear(ts, tz)
+ t := ts.(types.Timestamp)
+ return t.Receive(overloads.TimeGetDayOfYear, overloads.TimestampToDayOfYear, []ref.Val{})
}),
),
),
Function(overloads.TimeGetDayOfMonth,
MemberOverload(overloads.TimestampToDayOfMonthZeroBased, []*Type{TimestampType}, IntType,
UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetDayOfMonthZeroBased(ts, utcTZ)
+ t := ts.(types.Timestamp)
+ return t.Receive(overloads.TimeGetDayOfMonth, overloads.TimestampToDayOfMonthZeroBased, []ref.Val{})
}),
),
- MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetDayOfMonthZeroBased),
- ),
),
Function(overloads.TimeGetDate,
MemberOverload(overloads.TimestampToDayOfMonthOneBased, []*Type{TimestampType}, IntType,
UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetDayOfMonthOneBased(ts, utcTZ)
+ t := ts.(types.Timestamp)
+ return t.Receive(overloads.TimeGetDate, overloads.TimestampToDayOfMonthOneBased, []ref.Val{})
}),
),
- MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetDayOfMonthOneBased),
- ),
),
Function(overloads.TimeGetDayOfWeek,
MemberOverload(overloads.TimestampToDayOfWeek, []*Type{TimestampType}, IntType,
UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetDayOfWeek(ts, utcTZ)
+ t := ts.(types.Timestamp)
+ return t.Receive(overloads.TimeGetDayOfWeek, overloads.TimestampToDayOfWeek, []ref.Val{})
}),
),
- MemberOverload(overloads.TimestampToDayOfWeekWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetDayOfWeek),
- ),
),
Function(overloads.TimeGetHours,
MemberOverload(overloads.TimestampToHours, []*Type{TimestampType}, IntType,
UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetHours(ts, utcTZ)
+ t := ts.(types.Timestamp)
+ return t.Receive(overloads.TimeGetHours, overloads.TimestampToHours, []ref.Val{})
}),
),
- MemberOverload(overloads.TimestampToHoursWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetHours),
- ),
),
Function(overloads.TimeGetMinutes,
MemberOverload(overloads.TimestampToMinutes, []*Type{TimestampType}, IntType,
UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetMinutes(ts, utcTZ)
+ t := ts.(types.Timestamp)
+ return t.Receive(overloads.TimeGetMinutes, overloads.TimestampToMinutes, []ref.Val{})
}),
),
- MemberOverload(overloads.TimestampToMinutesWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetMinutes),
- ),
),
Function(overloads.TimeGetSeconds,
MemberOverload(overloads.TimestampToSeconds, []*Type{TimestampType}, IntType,
UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetSeconds(ts, utcTZ)
+ t := ts.(types.Timestamp)
+ return t.Receive(overloads.TimeGetSeconds, overloads.TimestampToSeconds, []ref.Val{})
}),
),
- MemberOverload(overloads.TimestampToSecondsWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetSeconds),
- ),
),
Function(overloads.TimeGetMilliseconds,
MemberOverload(overloads.TimestampToMilliseconds, []*Type{TimestampType}, IntType,
UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetMilliseconds(ts, utcTZ)
+ t := ts.(types.Timestamp)
+ return t.Receive(overloads.TimeGetMilliseconds, overloads.TimestampToMilliseconds, []ref.Val{})
}),
),
- MemberOverload(overloads.TimestampToMillisecondsWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetMilliseconds),
- ),
),
}
)
-
-func timestampGetFullYear(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Year())
-}
-
-func timestampGetMonth(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- // CEL spec indicates that the month should be 0-based, but the Time value
- // for Month() is 1-based.
- return types.Int(t.Month() - 1)
-}
-
-func timestampGetDayOfYear(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.YearDay() - 1)
-}
-
-func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Day() - 1)
-}
-
-func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Day())
-}
-
-func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Weekday())
-}
-
-func timestampGetHours(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Hour())
-}
-
-func timestampGetMinutes(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Minute())
-}
-
-func timestampGetSeconds(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Second())
-}
-
-func timestampGetMilliseconds(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Nanosecond() / 1000000)
-}
-
-func inTimeZone(ts, tz ref.Val) (time.Time, error) {
- t := ts.(types.Timestamp)
- val := string(tz.(types.String))
- ind := strings.Index(val, ":")
- if ind == -1 {
- loc, err := time.LoadLocation(val)
- if err != nil {
- return time.Time{}, err
- }
- return t.In(loc), nil
- }
-
- // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC
- // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes.
- hr, err := strconv.Atoi(string(val[0:ind]))
- if err != nil {
- return time.Time{}, err
- }
- min, err := strconv.Atoi(string(val[ind+1:]))
- if err != nil {
- return time.Time{}, err
- }
- var offset int
- if string(val[0]) == "-" {
- offset = hr*60 - min
- } else {
- offset = hr*60 + min
- }
- secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds())
- timezone := time.FixedZone("", secondsEastOfUTC)
- return t.In(timezone), nil
-}
diff --git a/vendor/github.com/google/cel-go/cel/macro.go b/vendor/github.com/google/cel-go/cel/macro.go
index 1eb414c8b..3d3c5be1b 100644
--- a/vendor/github.com/google/cel-go/cel/macro.go
+++ b/vendor/github.com/google/cel-go/cel/macro.go
@@ -15,6 +15,11 @@
package cel
import (
+ "fmt"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
"github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
@@ -26,7 +31,14 @@ import (
// a Macro should be created per arg-count or as a var arg macro.
type Macro = parser.Macro
-// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree.
+// MacroFactory defines an expansion function which converts a call and its arguments to a cel.Expr value.
+type MacroFactory = parser.MacroExpander
+
+// MacroExprFactory assists with the creation of Expr values in a manner which is consistent
+// the internal semantics and id generation behaviors of the parser and checker libraries.
+type MacroExprFactory = parser.ExprHelper
+
+// MacroExpander converts a call and its associated arguments into a protobuf Expr representation.
//
// If the MacroExpander determines within the implementation that an expansion is not needed it may return
// a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments
@@ -36,48 +48,211 @@ type Macro = parser.Macro
// and produces as output an Expr ast node.
//
// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil.
-type MacroExpander = parser.MacroExpander
+type MacroExpander func(eh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error)
// MacroExprHelper exposes helper methods for creating new expressions within a CEL abstract syntax tree.
-type MacroExprHelper = parser.ExprHelper
+// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is
+// consistent with the source position and expression id generation code leveraged by both
+// the parser and type-checker.
+type MacroExprHelper interface {
+ // Copy the input expression with a brand new set of identifiers.
+ Copy(*exprpb.Expr) *exprpb.Expr
+
+ // LiteralBool creates an Expr value for a bool literal.
+ LiteralBool(value bool) *exprpb.Expr
+
+ // LiteralBytes creates an Expr value for a byte literal.
+ LiteralBytes(value []byte) *exprpb.Expr
+
+ // LiteralDouble creates an Expr value for double literal.
+ LiteralDouble(value float64) *exprpb.Expr
+
+ // LiteralInt creates an Expr value for an int literal.
+ LiteralInt(value int64) *exprpb.Expr
+
+ // LiteralString creates am Expr value for a string literal.
+ LiteralString(value string) *exprpb.Expr
+
+ // LiteralUint creates an Expr value for a uint literal.
+ LiteralUint(value uint64) *exprpb.Expr
+
+ // NewList creates a CreateList instruction where the list is comprised of the optional set
+ // of elements provided as arguments.
+ NewList(elems ...*exprpb.Expr) *exprpb.Expr
+
+ // NewMap creates a CreateStruct instruction for a map where the map is comprised of the
+ // optional set of key, value entries.
+ NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
+
+ // NewMapEntry creates a Map Entry for the key, value pair.
+ NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry
+
+ // NewObject creates a CreateStruct instruction for an object with a given type name and
+ // optional set of field initializers.
+ NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
+
+ // NewObjectFieldInit creates a new Object field initializer from the field name and value.
+ NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry
+
+ // Fold creates a fold comprehension instruction.
+ //
+ // - iterVar is the iteration variable name.
+ // - iterRange represents the expression that resolves to a list or map where the elements or
+ // keys (respectively) will be iterated over.
+ // - accuVar is the accumulation variable name, typically parser.AccumulatorName.
+ // - accuInit is the initial expression whose value will be set for the accuVar prior to
+ // folding.
+ // - condition is the expression to test to determine whether to continue folding.
+ // - step is the expression to evaluation at the conclusion of a single fold iteration.
+ // - result is the computation to evaluate at the conclusion of the fold.
+ //
+ // The accuVar should not shadow variable names that you would like to reference within the
+ // environment in the step and condition expressions. Presently, the name __result__ is commonly
+ // used by built-in macros but this may change in the future.
+ Fold(iterVar string,
+ iterRange *exprpb.Expr,
+ accuVar string,
+ accuInit *exprpb.Expr,
+ condition *exprpb.Expr,
+ step *exprpb.Expr,
+ result *exprpb.Expr) *exprpb.Expr
+
+ // Ident creates an identifier Expr value.
+ Ident(name string) *exprpb.Expr
+
+ // AccuIdent returns an accumulator identifier for use with comprehension results.
+ AccuIdent() *exprpb.Expr
+
+ // GlobalCall creates a function call Expr value for a global (free) function.
+ GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr
+
+ // ReceiverCall creates a function call Expr value for a receiver-style function.
+ ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr
+
+ // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics.
+ PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr
+
+ // Select create a field traversal Expr value.
+ Select(operand *exprpb.Expr, field string) *exprpb.Expr
+
+ // OffsetLocation returns the Location of the expression identifier.
+ OffsetLocation(exprID int64) common.Location
+
+ // NewError associates an error message with a given expression id.
+ NewError(exprID int64, message string) *Error
+}
+
+// MacroOpt defines a functional option for configuring macro behavior.
+type MacroOpt = parser.MacroOpt
+
+// MacroDocs configures a list of strings into a multiline description for the macro.
+func MacroDocs(docs ...string) MacroOpt {
+ return parser.MacroDocs(docs...)
+}
+
+// MacroExamples configures a list of examples, either as a string or common.MultilineString,
+// into an example set to be provided with the macro Documentation() call.
+func MacroExamples(examples ...string) MacroOpt {
+ return parser.MacroExamples(examples...)
+}
+
+// GlobalMacro creates a Macro for a global function with the specified arg count.
+func GlobalMacro(function string, argCount int, factory MacroFactory, opts ...MacroOpt) Macro {
+ return parser.NewGlobalMacro(function, argCount, factory, opts...)
+}
+
+// ReceiverMacro creates a Macro for a receiver function matching the specified arg count.
+func ReceiverMacro(function string, argCount int, factory MacroFactory, opts ...MacroOpt) Macro {
+ return parser.NewReceiverMacro(function, argCount, factory, opts...)
+}
+
+// GlobalVarArgMacro creates a Macro for a global function with a variable arg count.
+func GlobalVarArgMacro(function string, factory MacroFactory, opts ...MacroOpt) Macro {
+ return parser.NewGlobalVarArgMacro(function, factory, opts...)
+}
+
+// ReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
+func ReceiverVarArgMacro(function string, factory MacroFactory, opts ...MacroOpt) Macro {
+ return parser.NewReceiverVarArgMacro(function, factory, opts...)
+}
// NewGlobalMacro creates a Macro for a global function with the specified arg count.
+//
+// Deprecated: use GlobalMacro
func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro {
- return parser.NewGlobalMacro(function, argCount, expander)
+ expand := adaptingExpander{expander}
+ return parser.NewGlobalMacro(function, argCount, expand.Expander)
}
// NewReceiverMacro creates a Macro for a receiver function matching the specified arg count.
+//
+// Deprecated: use ReceiverMacro
func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro {
- return parser.NewReceiverMacro(function, argCount, expander)
+ expand := adaptingExpander{expander}
+ return parser.NewReceiverMacro(function, argCount, expand.Expander)
}
// NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count.
+//
+// Deprecated: use GlobalVarArgMacro
func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro {
- return parser.NewGlobalVarArgMacro(function, expander)
+ expand := adaptingExpander{expander}
+ return parser.NewGlobalVarArgMacro(function, expand.Expander)
}
// NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
+//
+// Deprecated: use ReceiverVarArgMacro
func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro {
- return parser.NewReceiverVarArgMacro(function, expander)
+ expand := adaptingExpander{expander}
+ return parser.NewReceiverVarArgMacro(function, expand.Expander)
}
// HasMacroExpander expands the input call arguments into a presence test, e.g. has(.field)
func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
- return parser.MakeHas(meh, target, args)
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ arg, err := adaptToExpr(args[0])
+ if err != nil {
+ return nil, err
+ }
+ if arg.Kind() == ast.SelectKind {
+ s := arg.AsSelect()
+ return adaptToProto(ph.NewPresenceTest(s.Operand(), s.FieldName()))
+ }
+ return nil, ph.NewError(arg.ID(), "invalid argument to has() macro")
}
// ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the
// elements in the range match the predicate expressions:
// .exists(, )
func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
- return parser.MakeExists(meh, target, args)
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ out, err := parser.MakeExists(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
+ if err != nil {
+ return nil, err
+ }
+ return adaptToProto(out)
}
// ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly
// one of the elements in the range match the predicate expressions:
// .exists_one(, )
func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
- return parser.MakeExistsOne(meh, target, args)
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ out, err := parser.MakeExistsOne(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
+ if err != nil {
+ return nil, err
+ }
+ return adaptToProto(out)
}
// MapMacroExpander expands the input call arguments into a comprehension that transforms each element in the
@@ -91,14 +266,30 @@ func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*ex
// In the second form only iterVar values which return true when provided to the predicate expression
// are transformed.
func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
- return parser.MakeMap(meh, target, args)
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ out, err := parser.MakeMap(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
+ if err != nil {
+ return nil, err
+ }
+ return adaptToProto(out)
}
// FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains
// only elements which match the provided predicate expression:
// .filter(, )
func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
- return parser.MakeFilter(meh, target, args)
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ out, err := parser.MakeFilter(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
+ if err != nil {
+ return nil, err
+ }
+ return adaptToProto(out)
}
var (
@@ -142,3 +333,258 @@ var (
// NoMacros provides an alias to an empty list of macros
NoMacros = []Macro{}
)
+
+type adaptingExpander struct {
+ legacyExpander MacroExpander
+}
+
+func (adapt *adaptingExpander) Expander(eh parser.ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ var legacyTarget *exprpb.Expr = nil
+ var err *Error = nil
+ if target != nil {
+ legacyTarget, err = adaptToProto(target)
+ if err != nil {
+ return nil, err
+ }
+ }
+ legacyArgs := make([]*exprpb.Expr, len(args))
+ for i, arg := range args {
+ legacyArgs[i], err = adaptToProto(arg)
+ if err != nil {
+ return nil, err
+ }
+ }
+ ah := &adaptingHelper{modernHelper: eh}
+ legacyExpr, err := adapt.legacyExpander(ah, legacyTarget, legacyArgs)
+ if err != nil {
+ return nil, err
+ }
+ ex, err := adaptToExpr(legacyExpr)
+ if err != nil {
+ return nil, err
+ }
+ return ex, nil
+}
+
+func wrapErr(id int64, message string, err error) *common.Error {
+ return &common.Error{
+ Location: common.NoLocation,
+ Message: fmt.Sprintf("%s: %v", message, err),
+ ExprID: id,
+ }
+}
+
+type adaptingHelper struct {
+ modernHelper parser.ExprHelper
+}
+
+// Copy the input expression with a brand new set of identifiers.
+func (ah *adaptingHelper) Copy(e *exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.Copy(mustAdaptToExpr(e)))
+}
+
+// LiteralBool creates an Expr value for a bool literal.
+func (ah *adaptingHelper) LiteralBool(value bool) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bool(value)))
+}
+
+// LiteralBytes creates an Expr value for a byte literal.
+func (ah *adaptingHelper) LiteralBytes(value []byte) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bytes(value)))
+}
+
+// LiteralDouble creates an Expr value for double literal.
+func (ah *adaptingHelper) LiteralDouble(value float64) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Double(value)))
+}
+
+// LiteralInt creates an Expr value for an int literal.
+func (ah *adaptingHelper) LiteralInt(value int64) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Int(value)))
+}
+
+// LiteralString creates am Expr value for a string literal.
+func (ah *adaptingHelper) LiteralString(value string) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.String(value)))
+}
+
+// LiteralUint creates an Expr value for a uint literal.
+func (ah *adaptingHelper) LiteralUint(value uint64) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Uint(value)))
+}
+
+// NewList creates a CreateList instruction where the list is comprised of the optional set
+// of elements provided as arguments.
+func (ah *adaptingHelper) NewList(elems ...*exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewList(mustAdaptToExprs(elems)...))
+}
+
+// NewMap creates a CreateStruct instruction for a map where the map is comprised of the
+// optional set of key, value entries.
+func (ah *adaptingHelper) NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
+ adaptedEntries := make([]ast.EntryExpr, len(entries))
+ for i, e := range entries {
+ adaptedEntries[i] = mustAdaptToEntryExpr(e)
+ }
+ return mustAdaptToProto(ah.modernHelper.NewMap(adaptedEntries...))
+}
+
+// NewMapEntry creates a Map Entry for the key, value pair.
+func (ah *adaptingHelper) NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
+ return mustAdaptToProtoEntry(
+ ah.modernHelper.NewMapEntry(mustAdaptToExpr(key), mustAdaptToExpr(val), optional))
+}
+
+// NewObject creates a CreateStruct instruction for an object with a given type name and
+// optional set of field initializers.
+func (ah *adaptingHelper) NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
+ adaptedEntries := make([]ast.EntryExpr, len(fieldInits))
+ for i, e := range fieldInits {
+ adaptedEntries[i] = mustAdaptToEntryExpr(e)
+ }
+ return mustAdaptToProto(ah.modernHelper.NewStruct(typeName, adaptedEntries...))
+}
+
+// NewObjectFieldInit creates a new Object field initializer from the field name and value.
+func (ah *adaptingHelper) NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
+ return mustAdaptToProtoEntry(
+ ah.modernHelper.NewStructField(field, mustAdaptToExpr(init), optional))
+}
+
+// Fold creates a fold comprehension instruction.
+//
+// - iterVar is the iteration variable name.
+// - iterRange represents the expression that resolves to a list or map where the elements or
+// keys (respectively) will be iterated over.
+// - accuVar is the accumulation variable name, typically parser.AccumulatorName.
+// - accuInit is the initial expression whose value will be set for the accuVar prior to
+// folding.
+// - condition is the expression to test to determine whether to continue folding.
+// - step is the expression to evaluation at the conclusion of a single fold iteration.
+// - result is the computation to evaluate at the conclusion of the fold.
+//
+// The accuVar should not shadow variable names that you would like to reference within the
+// environment in the step and condition expressions. Presently, the name __result__ is commonly
+// used by built-in macros but this may change in the future.
+func (ah *adaptingHelper) Fold(iterVar string,
+ iterRange *exprpb.Expr,
+ accuVar string,
+ accuInit *exprpb.Expr,
+ condition *exprpb.Expr,
+ step *exprpb.Expr,
+ result *exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(
+ ah.modernHelper.NewComprehension(
+ mustAdaptToExpr(iterRange),
+ iterVar,
+ accuVar,
+ mustAdaptToExpr(accuInit),
+ mustAdaptToExpr(condition),
+ mustAdaptToExpr(step),
+ mustAdaptToExpr(result),
+ ),
+ )
+}
+
+// Ident creates an identifier Expr value.
+func (ah *adaptingHelper) Ident(name string) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewIdent(name))
+}
+
+// AccuIdent returns an accumulator identifier for use with comprehension results.
+func (ah *adaptingHelper) AccuIdent() *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewAccuIdent())
+}
+
+// GlobalCall creates a function call Expr value for a global (free) function.
+func (ah *adaptingHelper) GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewCall(function, mustAdaptToExprs(args)...))
+}
+
+// ReceiverCall creates a function call Expr value for a receiver-style function.
+func (ah *adaptingHelper) ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(
+ ah.modernHelper.NewMemberCall(function, mustAdaptToExpr(target), mustAdaptToExprs(args)...))
+}
+
+// PresenceTest creates a Select TestOnly Expr value for modelling has() semantics.
+func (ah *adaptingHelper) PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr {
+ op := mustAdaptToExpr(operand)
+ return mustAdaptToProto(ah.modernHelper.NewPresenceTest(op, field))
+}
+
+// Select create a field traversal Expr value.
+func (ah *adaptingHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr {
+ op := mustAdaptToExpr(operand)
+ return mustAdaptToProto(ah.modernHelper.NewSelect(op, field))
+}
+
+// OffsetLocation returns the Location of the expression identifier.
+func (ah *adaptingHelper) OffsetLocation(exprID int64) common.Location {
+ return ah.modernHelper.OffsetLocation(exprID)
+}
+
+// NewError associates an error message with a given expression id.
+func (ah *adaptingHelper) NewError(exprID int64, message string) *Error {
+ return ah.modernHelper.NewError(exprID, message)
+}
+
+func mustAdaptToExprs(exprs []*exprpb.Expr) []ast.Expr {
+ adapted := make([]ast.Expr, len(exprs))
+ for i, e := range exprs {
+ adapted[i] = mustAdaptToExpr(e)
+ }
+ return adapted
+}
+
+func mustAdaptToExpr(e *exprpb.Expr) ast.Expr {
+ out, _ := adaptToExpr(e)
+ return out
+}
+
+func adaptToExpr(e *exprpb.Expr) (ast.Expr, *Error) {
+ if e == nil {
+ return nil, nil
+ }
+ out, err := ast.ProtoToExpr(e)
+ if err != nil {
+ return nil, wrapErr(e.GetId(), "proto conversion failure", err)
+ }
+ return out, nil
+}
+
+func mustAdaptToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) ast.EntryExpr {
+ out, _ := ast.ProtoToEntryExpr(e)
+ return out
+}
+
+func mustAdaptToProto(e ast.Expr) *exprpb.Expr {
+ out, _ := adaptToProto(e)
+ return out
+}
+
+func adaptToProto(e ast.Expr) (*exprpb.Expr, *Error) {
+ if e == nil {
+ return nil, nil
+ }
+ out, err := ast.ExprToProto(e)
+ if err != nil {
+ return nil, wrapErr(e.ID(), "expr conversion failure", err)
+ }
+ return out, nil
+}
+
+func mustAdaptToProtoEntry(e ast.EntryExpr) *exprpb.Expr_CreateStruct_Entry {
+ out, _ := ast.EntryExprToProto(e)
+ return out
+}
+
+func toParserHelper(meh MacroExprHelper) (parser.ExprHelper, *Error) {
+ ah, ok := meh.(*adaptingHelper)
+ if !ok {
+ return nil, common.NewError(0,
+ fmt.Sprintf("unsupported macro helper: %v (%T)", meh, meh),
+ common.NoLocation)
+ }
+ return ah.modernHelper, nil
+}
diff --git a/vendor/github.com/google/cel-go/cel/optimizer.go b/vendor/github.com/google/cel-go/cel/optimizer.go
new file mode 100644
index 000000000..9a2a97a64
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/optimizer.go
@@ -0,0 +1,535 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "sort"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// StaticOptimizer contains a sequence of ASTOptimizer instances which will be applied in order.
+//
+// The static optimizer normalizes expression ids and type-checking run between optimization
+// passes to ensure that the final optimized output is a valid expression with metadata consistent
+// with what would have been generated from a parsed and checked expression.
+//
+// Note: source position information is best-effort and likely wrong, but optimized expressions
+// should be suitable for calls to parser.Unparse.
+type StaticOptimizer struct {
+ optimizers []ASTOptimizer
+}
+
+// NewStaticOptimizer creates a StaticOptimizer with a sequence of ASTOptimizer's to be applied
+// to a checked expression.
+func NewStaticOptimizer(optimizers ...ASTOptimizer) *StaticOptimizer {
+ return &StaticOptimizer{
+ optimizers: optimizers,
+ }
+}
+
+// Optimize applies a sequence of optimizations to an Ast within a given environment.
+//
+// If issues are encountered, the Issues.Err() return value will be non-nil.
+func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) {
+ // Make a copy of the AST to be optimized.
+ optimized := ast.Copy(a.NativeRep())
+ ids := newIDGenerator(ast.MaxID(a.NativeRep()))
+
+ // Create the optimizer context, could be pooled in the future.
+ issues := NewIssues(common.NewErrors(a.Source()))
+ baseFac := ast.NewExprFactory()
+ exprFac := &optimizerExprFactory{
+ idGenerator: ids,
+ fac: baseFac,
+ sourceInfo: optimized.SourceInfo(),
+ }
+ ctx := &OptimizerContext{
+ optimizerExprFactory: exprFac,
+ Env: env,
+ Issues: issues,
+ }
+
+ // Apply the optimizations sequentially.
+ for _, o := range opt.optimizers {
+ optimized = o.Optimize(ctx, optimized)
+ if issues.Err() != nil {
+ return nil, issues
+ }
+ // Normalize expression id metadata including coordination with macro call metadata.
+ freshIDGen := newIDGenerator(0)
+ info := optimized.SourceInfo()
+ expr := optimized.Expr()
+ normalizeIDs(freshIDGen.renumberStable, expr, info)
+ cleanupMacroRefs(expr, info)
+
+ // Recheck the updated expression for any possible type-agreement or validation errors.
+ parsed := &Ast{
+ source: a.Source(),
+ impl: ast.NewAST(expr, info)}
+ checked, iss := ctx.Check(parsed)
+ if iss.Err() != nil {
+ return nil, iss
+ }
+ optimized = checked.NativeRep()
+ }
+
+ // Return the optimized result.
+ return &Ast{
+ source: a.Source(),
+ impl: optimized,
+ }, nil
+}
+
+// normalizeIDs ensures that the metadata present with an AST is reset in a manner such
+// that the ids within the expression correspond to the ids within macros.
+func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInfo) {
+ optimized.RenumberIDs(idGen)
+ if len(info.MacroCalls()) == 0 {
+ return
+ }
+
+ // Sort the macro ids to make sure that the renumbering of macro-specific variables
+ // is stable across normalization calls.
+ sortedMacroIDs := []int64{}
+ for id := range info.MacroCalls() {
+ sortedMacroIDs = append(sortedMacroIDs, id)
+ }
+ sort.Slice(sortedMacroIDs, func(i, j int) bool { return sortedMacroIDs[i] < sortedMacroIDs[j] })
+
+ // First, update the macro call ids themselves.
+ callIDMap := map[int64]int64{}
+ for _, id := range sortedMacroIDs {
+ callIDMap[id] = idGen(id)
+ }
+ // Then update the macro call definitions which refer to these ids, but
+ // ensure that the updates don't collide and remove macro entries which haven't
+ // been visited / updated yet.
+ type macroUpdate struct {
+ id int64
+ call ast.Expr
+ }
+ macroUpdates := []macroUpdate{}
+ for _, oldID := range sortedMacroIDs {
+ newID := callIDMap[oldID]
+ call, found := info.GetMacroCall(oldID)
+ if !found {
+ continue
+ }
+ call.RenumberIDs(idGen)
+ macroUpdates = append(macroUpdates, macroUpdate{id: newID, call: call})
+ info.ClearMacroCall(oldID)
+ }
+ for _, u := range macroUpdates {
+ info.SetMacroCall(u.id, u.call)
+ }
+}
+
+func cleanupMacroRefs(expr ast.Expr, info *ast.SourceInfo) {
+ if len(info.MacroCalls()) == 0 {
+ return
+ }
+
+ // Sanitize the macro call references once the optimized expression has been computed
+ // and the ids normalized between the expression and the macros.
+ exprRefMap := make(map[int64]struct{})
+ ast.PostOrderVisit(expr, ast.NewExprVisitor(func(e ast.Expr) {
+ if e.ID() == 0 {
+ return
+ }
+ exprRefMap[e.ID()] = struct{}{}
+ }))
+ // Update the macro call id references to ensure that macro pointers are
+ // updated consistently across macros.
+ for _, call := range info.MacroCalls() {
+ ast.PostOrderVisit(call, ast.NewExprVisitor(func(e ast.Expr) {
+ if e.ID() == 0 {
+ return
+ }
+ exprRefMap[e.ID()] = struct{}{}
+ }))
+ }
+ for id := range info.MacroCalls() {
+ if _, found := exprRefMap[id]; !found {
+ info.ClearMacroCall(id)
+ }
+ }
+}
+
+// newIDGenerator ensures that new ids are only created the first time they are encountered.
+func newIDGenerator(seed int64) *idGenerator {
+ return &idGenerator{
+ idMap: make(map[int64]int64),
+ seed: seed,
+ }
+}
+
+type idGenerator struct {
+ idMap map[int64]int64
+ seed int64
+}
+
+func (gen *idGenerator) nextID() int64 {
+ gen.seed++
+ return gen.seed
+}
+
+func (gen *idGenerator) renumberStable(id int64) int64 {
+ if id == 0 {
+ return 0
+ }
+ if newID, found := gen.idMap[id]; found {
+ return newID
+ }
+ nextID := gen.nextID()
+ gen.idMap[id] = nextID
+ return nextID
+}
+
+// OptimizerContext embeds Env and Issues instances to make it easy to type-check and evaluate
+// subexpressions and report any errors encountered along the way. The context also embeds the
+// optimizerExprFactory which can be used to generate new sub-expressions with expression ids
+// consistent with the expectations of a parsed expression.
+type OptimizerContext struct {
+ *Env
+ *optimizerExprFactory
+ *Issues
+}
+
+// ExtendEnv auguments the context's environment with the additional options.
+func (opt *OptimizerContext) ExtendEnv(opts ...EnvOption) error {
+ e, err := opt.Env.Extend(opts...)
+ if err != nil {
+ return err
+ }
+ opt.Env = e
+ return nil
+}
+
+// ASTOptimizer applies an optimization over an AST and returns the optimized result.
+type ASTOptimizer interface {
+ // Optimize optimizes a type-checked AST within an Environment and accumulates any issues.
+ Optimize(*OptimizerContext, *ast.AST) *ast.AST
+}
+
+type optimizerExprFactory struct {
+ *idGenerator
+ fac ast.ExprFactory
+ sourceInfo *ast.SourceInfo
+}
+
+// NewAST creates an AST from the current expression using the tracked source info which
+// is modified and managed by the OptimizerContext.
+func (opt *optimizerExprFactory) NewAST(expr ast.Expr) *ast.AST {
+ return ast.NewAST(expr, opt.sourceInfo)
+}
+
+// CopyAST creates a renumbered copy of `Expr` and `SourceInfo` values of the input AST, where the
+// renumbering uses the same scheme as the core optimizer logic ensuring there are no collisions
+// between copies.
+//
+// Use this method before attempting to merge the expression from AST into another.
+func (opt *optimizerExprFactory) CopyAST(a *ast.AST) (ast.Expr, *ast.SourceInfo) {
+ idGen := newIDGenerator(opt.nextID())
+ defer func() { opt.seed = idGen.nextID() }()
+ copyExpr := opt.fac.CopyExpr(a.Expr())
+ copyInfo := ast.CopySourceInfo(a.SourceInfo())
+ normalizeIDs(idGen.renumberStable, copyExpr, copyInfo)
+ return copyExpr, copyInfo
+}
+
+// CopyASTAndMetadata copies the input AST and propagates the macro metadata into the AST being
+// optimized.
+func (opt *optimizerExprFactory) CopyASTAndMetadata(a *ast.AST) ast.Expr {
+ copyExpr, copyInfo := opt.CopyAST(a)
+ for macroID, call := range copyInfo.MacroCalls() {
+ opt.SetMacroCall(macroID, call)
+ }
+ return copyExpr
+}
+
+// ClearMacroCall clears the macro at the given expression id.
+func (opt *optimizerExprFactory) ClearMacroCall(id int64) {
+ opt.sourceInfo.ClearMacroCall(id)
+}
+
+// SetMacroCall sets the macro call metadata for the given macro id within the tracked source info
+// metadata.
+func (opt *optimizerExprFactory) SetMacroCall(id int64, expr ast.Expr) {
+ opt.sourceInfo.SetMacroCall(id, expr)
+}
+
+// MacroCalls returns the map of macro calls currently in the context.
+func (opt *optimizerExprFactory) MacroCalls() map[int64]ast.Expr {
+ return opt.sourceInfo.MacroCalls()
+}
+
+// NewBindMacro creates an AST expression representing the expanded bind() macro, and a macro expression
+// representing the unexpanded call signature to be inserted into the source info macro call metadata.
+func (opt *optimizerExprFactory) NewBindMacro(macroID int64, varName string, varInit, remaining ast.Expr) (astExpr, macroExpr ast.Expr) {
+ varID := opt.nextID()
+ remainingID := opt.nextID()
+ remaining = opt.fac.CopyExpr(remaining)
+ remaining.RenumberIDs(func(id int64) int64 {
+ if id == macroID {
+ return remainingID
+ }
+ return id
+ })
+ if call, exists := opt.sourceInfo.GetMacroCall(macroID); exists {
+ opt.SetMacroCall(remainingID, opt.fac.CopyExpr(call))
+ }
+
+ astExpr = opt.fac.NewComprehension(macroID,
+ opt.fac.NewList(opt.nextID(), []ast.Expr{}, []int32{}),
+ "#unused",
+ varName,
+ opt.fac.CopyExpr(varInit),
+ opt.fac.NewLiteral(opt.nextID(), types.False),
+ opt.fac.NewIdent(varID, varName),
+ remaining)
+
+ macroExpr = opt.fac.NewMemberCall(0, "bind",
+ opt.fac.NewIdent(opt.nextID(), "cel"),
+ opt.fac.NewIdent(varID, varName),
+ opt.fac.CopyExpr(varInit),
+ opt.fac.CopyExpr(remaining))
+ opt.sanitizeMacro(macroID, macroExpr)
+ return
+}
+
+// NewCall creates a global function call invocation expression.
+//
+// Example:
+//
+// countByField(list, fieldName)
+// - function: countByField
+// - args: [list, fieldName]
+func (opt *optimizerExprFactory) NewCall(function string, args ...ast.Expr) ast.Expr {
+ return opt.fac.NewCall(opt.nextID(), function, args...)
+}
+
+// NewMemberCall creates a member function call invocation expression where 'target' is the receiver of the call.
+//
+// Example:
+//
+// list.countByField(fieldName)
+// - function: countByField
+// - target: list
+// - args: [fieldName]
+func (opt *optimizerExprFactory) NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr {
+ return opt.fac.NewMemberCall(opt.nextID(), function, target, args...)
+}
+
+// NewIdent creates a new identifier expression.
+//
+// Examples:
+//
+// - simple_var_name
+// - qualified.subpackage.var_name
+func (opt *optimizerExprFactory) NewIdent(name string) ast.Expr {
+ return opt.fac.NewIdent(opt.nextID(), name)
+}
+
+// NewLiteral creates a new literal expression value.
+//
+// The range of valid values for a literal generated during optimization is different than for expressions
+// generated via parsing / type-checking, as the ref.Val may be _any_ CEL value so long as the value can
+// be converted back to a literal-like form.
+func (opt *optimizerExprFactory) NewLiteral(value ref.Val) ast.Expr {
+ return opt.fac.NewLiteral(opt.nextID(), value)
+}
+
+// NewList creates a list expression with a set of optional indices.
+//
+// Examples:
+//
+// [a, b]
+// - elems: [a, b]
+// - optIndices: []
+//
+// [a, ?b, ?c]
+// - elems: [a, b, c]
+// - optIndices: [1, 2]
+func (opt *optimizerExprFactory) NewList(elems []ast.Expr, optIndices []int32) ast.Expr {
+ return opt.fac.NewList(opt.nextID(), elems, optIndices)
+}
+
+// NewMap creates a map from a set of entry expressions which contain a key and value expression.
+func (opt *optimizerExprFactory) NewMap(entries []ast.EntryExpr) ast.Expr {
+ return opt.fac.NewMap(opt.nextID(), entries)
+}
+
+// NewMapEntry creates a map entry with a key and value expression and a flag to indicate whether the
+// entry is optional.
+//
+// Examples:
+//
+// {a: b}
+// - key: a
+// - value: b
+// - optional: false
+//
+// {?a: ?b}
+// - key: a
+// - value: b
+// - optional: true
+func (opt *optimizerExprFactory) NewMapEntry(key, value ast.Expr, isOptional bool) ast.EntryExpr {
+ return opt.fac.NewMapEntry(opt.nextID(), key, value, isOptional)
+}
+
+// NewHasMacro generates a test-only select expression to be included within an AST and an unexpanded
+// has() macro call signature to be inserted into the source info macro call metadata.
+func (opt *optimizerExprFactory) NewHasMacro(macroID int64, s ast.Expr) (astExpr, macroExpr ast.Expr) {
+ sel := s.AsSelect()
+ astExpr = opt.fac.NewPresenceTest(macroID, sel.Operand(), sel.FieldName())
+ macroExpr = opt.fac.NewCall(0, "has",
+ opt.NewSelect(opt.fac.CopyExpr(sel.Operand()), sel.FieldName()))
+ opt.sanitizeMacro(macroID, macroExpr)
+ return
+}
+
+// NewSelect creates a select expression where a field value is selected from an operand.
+//
+// Example:
+//
+// msg.field_name
+// - operand: msg
+// - field: field_name
+func (opt *optimizerExprFactory) NewSelect(operand ast.Expr, field string) ast.Expr {
+ return opt.fac.NewSelect(opt.nextID(), operand, field)
+}
+
+// NewStruct creates a new typed struct value with an set of field initializations.
+//
+// Example:
+//
+// pkg.TypeName{field: value}
+// - typeName: pkg.TypeName
+// - fields: [{field: value}]
+func (opt *optimizerExprFactory) NewStruct(typeName string, fields []ast.EntryExpr) ast.Expr {
+ return opt.fac.NewStruct(opt.nextID(), typeName, fields)
+}
+
+// NewStructField creates a struct field initialization.
+//
+// Examples:
+//
+// {count: 3u}
+// - field: count
+// - value: 3u
+// - optional: false
+//
+// {?count: x}
+// - field: count
+// - value: x
+// - optional: true
+func (opt *optimizerExprFactory) NewStructField(field string, value ast.Expr, isOptional bool) ast.EntryExpr {
+ return opt.fac.NewStructField(opt.nextID(), field, value, isOptional)
+}
+
+// UpdateExpr updates the target expression with the updated content while preserving macro metadata.
+//
+// There are four scenarios during the update to consider:
+// 1. target is not macro, updated is not macro
+// 2. target is macro, updated is not macro
+// 3. target is macro, updated is macro
+// 4. target is not macro, updated is macro
+//
+// When the target is a macro already, it may either be updated to a new macro function
+// body if the update is also a macro, or it may be removed altogether if the update is
+// a macro.
+//
+// When the update is a macro, then the target references within other macros must be
+// updated to point to the new updated macro. Otherwise, other macros which pointed to
+// the target body must be replaced with copies of the updated expression body.
+func (opt *optimizerExprFactory) UpdateExpr(target, updated ast.Expr) {
+ // Update the expression
+ target.SetKindCase(updated)
+
+ // Early return if there's no macros present sa the source info reflects the
+ // macro set from the target and updated expressions.
+ if len(opt.sourceInfo.MacroCalls()) == 0 {
+ return
+ }
+ // Determine whether the target expression was a macro.
+ _, targetIsMacro := opt.sourceInfo.GetMacroCall(target.ID())
+
+ // Determine whether the updated expression was a macro.
+ updatedMacro, updatedIsMacro := opt.sourceInfo.GetMacroCall(updated.ID())
+
+ if updatedIsMacro {
+ // If the updated call was a macro, then updated id maps to target id,
+ // and the updated macro moves into the target id slot.
+ opt.sourceInfo.ClearMacroCall(updated.ID())
+ opt.sourceInfo.SetMacroCall(target.ID(), updatedMacro)
+ } else if targetIsMacro {
+ // Otherwise if the target expr was a macro, but is no longer, clear
+ // the macro reference.
+ opt.sourceInfo.ClearMacroCall(target.ID())
+ }
+
+ // Punch holes in the updated value where macros references exist.
+ macroExpr := opt.fac.CopyExpr(target)
+ macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) {
+ if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists {
+ e.SetKindCase(nil)
+ }
+ })
+ ast.PostOrderVisit(macroExpr, macroRefVisitor)
+
+ // Update any references to the expression within a macro
+ macroVisitor := ast.NewExprVisitor(func(call ast.Expr) {
+ // Update the target expression to point to the macro expression which
+ // will be empty if the updated expression was a macro.
+ if call.ID() == target.ID() {
+ call.SetKindCase(opt.fac.CopyExpr(macroExpr))
+ }
+ // Update the macro call expression if it refers to the updated expression
+ // id which has since been remapped to the target id.
+ if call.ID() == updated.ID() {
+ // Either ensure the expression is a macro reference or a populated with
+ // the relevant sub-expression if the updated expr was not a macro.
+ if updatedIsMacro {
+ call.SetKindCase(nil)
+ } else {
+ call.SetKindCase(opt.fac.CopyExpr(macroExpr))
+ }
+ // Since SetKindCase does not renumber the id, ensure the references to
+ // the old 'updated' id are mapped to the target id.
+ call.RenumberIDs(func(id int64) int64 {
+ if id == updated.ID() {
+ return target.ID()
+ }
+ return id
+ })
+ }
+ })
+ for _, call := range opt.sourceInfo.MacroCalls() {
+ ast.PostOrderVisit(call, macroVisitor)
+ }
+}
+
+func (opt *optimizerExprFactory) sanitizeMacro(macroID int64, macroExpr ast.Expr) {
+ macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) {
+ if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists && e.ID() != macroID {
+ e.SetKindCase(nil)
+ }
+ })
+ ast.PostOrderVisit(macroExpr, macroRefVisitor)
+}
diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go
index 05867730d..fee67323c 100644
--- a/vendor/github.com/google/cel-go/cel/options.go
+++ b/vendor/github.com/google/cel-go/cel/options.go
@@ -15,6 +15,7 @@
package cel
import (
+ "errors"
"fmt"
"google.golang.org/protobuf/proto"
@@ -25,6 +26,8 @@ import (
"github.com/google/cel-go/checker"
"github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/env"
"github.com/google/cel-go/common/functions"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/pb"
@@ -61,8 +64,35 @@ const (
// compressing the logic graph to a single call when multiple like-operator
// expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d])
featureVariadicLogicalASTs
+
+ // Enable error generation when a presence test or optional field selection is
+ // performed on a primitive type.
+ featureEnableErrorOnBadPresenceTest
+
+ // Enable escape syntax for field identifiers (`).
+ featureIdentEscapeSyntax
)
+var featureIDsToNames = map[int]string{
+ featureEnableMacroCallTracking: "cel.feature.macro_call_tracking",
+ featureCrossTypeNumericComparisons: "cel.feature.cross_type_numeric_comparisons",
+ featureIdentEscapeSyntax: "cel.feature.backtick_escape_syntax",
+}
+
+func featureNameByID(id int) (string, bool) {
+ name, found := featureIDsToNames[id]
+ return name, found
+}
+
+func featureIDByName(name string) (int, bool) {
+ for id, n := range featureIDsToNames {
+ if n == name {
+ return id, true
+ }
+ }
+ return 0, false
+}
+
// EnvOption is a functional interface for configuring the environment.
type EnvOption func(e *Env) (*Env, error)
@@ -105,6 +135,8 @@ func CustomTypeProvider(provider any) EnvOption {
// Note: Declarations will by default be appended to the pre-existing declaration set configured
// for the environment. The NewEnv call builds on top of the standard CEL declarations. For a
// purely custom set of declarations use NewCustomEnv.
+//
+// Deprecated: use FunctionDecls and VariableDecls or FromConfig instead.
func Declarations(decls ...*exprpb.Decl) EnvOption {
declOpts := []EnvOption{}
var err error
@@ -243,6 +275,13 @@ func Abbrevs(qualifiedNames ...string) EnvOption {
}
}
+// customTypeRegistry is an internal-only interface containing the minimum methods required to support
+// custom types. It is a subset of methods from ref.TypeRegistry.
+type customTypeRegistry interface {
+ RegisterDescriptor(protoreflect.FileDescriptor) error
+ RegisterType(...ref.Type) error
+}
+
// Types adds one or more type declarations to the environment, allowing for construction of
// type-literals whose definitions are included in the common expression built-in set.
//
@@ -255,12 +294,7 @@ func Abbrevs(qualifiedNames ...string) EnvOption {
// Note: This option must be specified after the CustomTypeProvider option when used together.
func Types(addTypes ...any) EnvOption {
return func(e *Env) (*Env, error) {
- var reg ref.TypeRegistry
- var isReg bool
- reg, isReg = e.provider.(*types.Registry)
- if !isReg {
- reg, isReg = e.provider.(ref.TypeRegistry)
- }
+ reg, isReg := e.provider.(customTypeRegistry)
if !isReg {
return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
}
@@ -297,7 +331,7 @@ func Types(addTypes ...any) EnvOption {
// extension or by re-using the same EnvOption with another NewEnv() call.
func TypeDescs(descs ...any) EnvOption {
return func(e *Env) (*Env, error) {
- reg, isReg := e.provider.(ref.TypeRegistry)
+ reg, isReg := e.provider.(customTypeRegistry)
if !isReg {
return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
}
@@ -345,7 +379,7 @@ func TypeDescs(descs ...any) EnvOption {
}
}
-func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) error {
+func registerFileSet(reg customTypeRegistry, fileSet *descpb.FileDescriptorSet) error {
files, err := protodesc.NewFiles(fileSet)
if err != nil {
return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err)
@@ -353,7 +387,7 @@ func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) er
return registerFiles(reg, files)
}
-func registerFiles(reg ref.TypeRegistry, files *protoregistry.Files) error {
+func registerFiles(reg customTypeRegistry, files *protoregistry.Files) error {
var err error
files.RangeFiles(func(fd protoreflect.FileDescriptor) bool {
err = reg.RegisterDescriptor(fd)
@@ -370,7 +404,7 @@ type ProgramOption func(p *prog) (*prog, error)
// InterpretableDecorators can be used to inspect, alter, or replace the Program plan.
func CustomDecorator(dec interpreter.InterpretableDecorator) ProgramOption {
return func(p *prog) (*prog, error) {
- p.decorators = append(p.decorators, dec)
+ p.plannerOptions = append(p.plannerOptions, interpreter.CustomDecorator(dec))
return p, nil
}
}
@@ -392,10 +426,10 @@ func Functions(funcs ...*functions.Overload) ProgramOption {
// variables with the same name provided to the Eval() call. If Globals is used in a Library with
// a Lib EnvOption, vars may shadow variables provided by previously added libraries.
//
-// The vars value may either be an `interpreter.Activation` instance or a `map[string]any`.
+// The vars value may either be an `cel.Activation` instance or a `map[string]any`.
func Globals(vars any) ProgramOption {
return func(p *prog) (*prog, error) {
- defaultVars, err := interpreter.NewActivation(vars)
+ defaultVars, err := NewActivation(vars)
if err != nil {
return nil, err
}
@@ -417,6 +451,174 @@ func OptimizeRegex(regexOptimizations ...*interpreter.RegexOptimization) Program
}
}
+// ConfigOptionFactory declares a signature which accepts a configuration element, e.g. env.Extension
+// and optionally produces an EnvOption in response.
+//
+// If there are multiple ConfigOptionFactory values which could apply to the same configuration node
+// the first one that returns an EnvOption and a `true` response will be used, and the config node
+// will not be passed along to any other option factory.
+//
+// Only the *env.Extension type is provided at this time, but validators, optimizers, and other tuning
+// parameters may be supported in the future.
+type ConfigOptionFactory func(any) (EnvOption, bool)
+
+// FromConfig produces and applies a set of EnvOption values derived from an env.Config object.
+//
+// For configuration elements which refer to features outside of the `cel` package, an optional set of
+// ConfigOptionFactory values may be passed in to support the conversion from static configuration to
+// configured cel.Env value.
+//
+// Note: disabling the standard library will clear the EnvOptions values previously set for the
+// environment with the exception of propagating types and adapters over to the new environment.
+//
+// Note: to support custom types referenced in the configuration file, you must ensure that one of
+// the following options appears before the FromConfig option: Types, TypeDescs, or CustomTypeProvider
+// as the type provider configured at the time when the config is processed is the one used to derive
+// type references from the configuration.
+func FromConfig(config *env.Config, optFactories ...ConfigOptionFactory) EnvOption {
+ return func(e *Env) (*Env, error) {
+ if err := config.Validate(); err != nil {
+ return nil, err
+ }
+ opts, err := configToEnvOptions(config, e.CELTypeProvider(), optFactories)
+ if err != nil {
+ return nil, err
+ }
+ for _, o := range opts {
+ e, err = o(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return e, nil
+ }
+}
+
+// configToEnvOptions generates a set of EnvOption values (or error) based on a config, a type provider,
+// and an optional set of environment options.
+func configToEnvOptions(config *env.Config, provider types.Provider, optFactories []ConfigOptionFactory) ([]EnvOption, error) {
+ envOpts := []EnvOption{}
+ // Configure the standard lib subset.
+ if config.StdLib != nil {
+ envOpts = append(envOpts, func(e *Env) (*Env, error) {
+ if e.HasLibrary("cel.lib.std") {
+ return nil, errors.New("invalid subset of stdlib: create a custom env")
+ }
+ return e, nil
+ })
+ if !config.StdLib.Disabled {
+ envOpts = append(envOpts, StdLib(StdLibSubset(config.StdLib)))
+ }
+ } else {
+ envOpts = append(envOpts, StdLib())
+ }
+
+ // Configure the container
+ if config.Container != "" {
+ envOpts = append(envOpts, Container(config.Container))
+ }
+
+ // Configure abbreviations
+ for _, imp := range config.Imports {
+ envOpts = append(envOpts, Abbrevs(imp.Name))
+ }
+
+ // Configure the context variable declaration
+ if config.ContextVariable != nil {
+ typeName := config.ContextVariable.TypeName
+ if _, found := provider.FindStructType(typeName); !found {
+ return nil, fmt.Errorf("invalid context proto type: %q", typeName)
+ }
+ // Attempt to instantiate the proto in order to reflect to its descriptor
+ msg := provider.NewValue(typeName, map[string]ref.Val{})
+ pbMsg, ok := msg.Value().(proto.Message)
+ if !ok {
+ return nil, fmt.Errorf("unsupported context type: %T", msg.Value())
+ }
+ envOpts = append(envOpts, DeclareContextProto(pbMsg.ProtoReflect().Descriptor()))
+ }
+
+ // Configure variables
+ if len(config.Variables) != 0 {
+ vars := make([]*decls.VariableDecl, 0, len(config.Variables))
+ for _, v := range config.Variables {
+ vDef, err := v.AsCELVariable(provider)
+ if err != nil {
+ return nil, err
+ }
+ vars = append(vars, vDef)
+ }
+ envOpts = append(envOpts, VariableDecls(vars...))
+ }
+
+ // Configure functions
+ if len(config.Functions) != 0 {
+ funcs := make([]*decls.FunctionDecl, 0, len(config.Functions))
+ for _, f := range config.Functions {
+ fnDef, err := f.AsCELFunction(provider)
+ if err != nil {
+ return nil, err
+ }
+ funcs = append(funcs, fnDef)
+ }
+ envOpts = append(envOpts, FunctionDecls(funcs...))
+ }
+
+ // Configure features
+ for _, feat := range config.Features {
+ // Note, if a feature is not found, it is skipped as it is possible the feature
+ // is not intended to be supported publicly. In the future, a refinement of
+ // to this strategy to report unrecognized features and validators should probably
+ // be covered as a standard ConfigOptionFactory
+ if id, found := featureIDByName(feat.Name); found {
+ envOpts = append(envOpts, features(id, feat.Enabled))
+ }
+ }
+
+ // Configure validators
+ for _, val := range config.Validators {
+ if fac, found := astValidatorFactories[val.Name]; found {
+ envOpts = append(envOpts, func(e *Env) (*Env, error) {
+ validator, err := fac(val)
+ if err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+ return ASTValidators(validator)(e)
+ })
+ } else if opt, handled := handleExtendedConfigOption(val, optFactories); handled {
+ envOpts = append(envOpts, opt)
+ }
+ // we don't error when the validator isn't found as it may be part
+ // of an extension library and enabled implicitly.
+ }
+
+ // Configure extensions
+ for _, ext := range config.Extensions {
+ // version number has been validated by the call to `Validate`
+ ver, _ := ext.VersionNumber()
+ if ext.Name == "optional" {
+ envOpts = append(envOpts, OptionalTypes(OptionalTypesVersion(ver)))
+ } else {
+ opt, handled := handleExtendedConfigOption(ext, optFactories)
+ if !handled {
+ return nil, fmt.Errorf("unrecognized extension: %s", ext.Name)
+ }
+ envOpts = append(envOpts, opt)
+ }
+ }
+
+ return envOpts, nil
+}
+
+func handleExtendedConfigOption(conf any, optFactories []ConfigOptionFactory) (EnvOption, bool) {
+ for _, optFac := range optFactories {
+ if opt, useOption := optFac(conf); useOption {
+ return opt, true
+ }
+ }
+ return nil, false
+}
+
// EvalOption indicates an evaluation option that may affect the evaluation behavior or information
// in the output result.
type EvalOption int
@@ -448,6 +650,8 @@ const (
OptTrackCost EvalOption = 1 << iota
// OptCheckStringFormat enables compile-time checking of string.format calls for syntax/cardinality.
+ //
+ // Deprecated: use ext.StringsValidateFormatCalls() as this option is now a no-op.
OptCheckStringFormat EvalOption = 1 << iota
)
@@ -523,7 +727,7 @@ func fieldToCELType(field protoreflect.FieldDescriptor) (*Type, error) {
return nil, fmt.Errorf("field %s type %s not implemented", field.FullName(), field.Kind().String())
}
-func fieldToVariable(field protoreflect.FieldDescriptor) (EnvOption, error) {
+func fieldToVariable(field protoreflect.FieldDescriptor) (*decls.VariableDecl, error) {
name := string(field.Name())
if field.IsMap() {
mapKey := field.MapKey()
@@ -536,20 +740,20 @@ func fieldToVariable(field protoreflect.FieldDescriptor) (EnvOption, error) {
if err != nil {
return nil, err
}
- return Variable(name, MapType(keyType, valueType)), nil
+ return decls.NewVariable(name, MapType(keyType, valueType)), nil
}
if field.IsList() {
elemType, err := fieldToCELType(field)
if err != nil {
return nil, err
}
- return Variable(name, ListType(elemType)), nil
+ return decls.NewVariable(name, ListType(elemType)), nil
}
celType, err := fieldToCELType(field)
if err != nil {
return nil, err
}
- return Variable(name, celType), nil
+ return decls.NewVariable(name, celType), nil
}
// DeclareContextProto returns an option to extend CEL environment with declarations from the given context proto.
@@ -557,17 +761,25 @@ func fieldToVariable(field protoreflect.FieldDescriptor) (EnvOption, error) {
// https://github.com/google/cel-spec/blob/master/doc/langdef.md#evaluation-environment
func DeclareContextProto(descriptor protoreflect.MessageDescriptor) EnvOption {
return func(e *Env) (*Env, error) {
+ if e.contextProto != nil {
+ return nil, fmt.Errorf("context proto already declared as %q, got %q",
+ e.contextProto.FullName(), descriptor.FullName())
+ }
+ e.contextProto = descriptor
fields := descriptor.Fields()
+ vars := make([]*decls.VariableDecl, 0, fields.Len())
for i := 0; i < fields.Len(); i++ {
field := fields.Get(i)
variable, err := fieldToVariable(field)
if err != nil {
return nil, err
}
- e, err = variable(e)
- if err != nil {
- return nil, err
- }
+ vars = append(vars, variable)
+ }
+ var err error
+ e, err = VariableDecls(vars...)(e)
+ if err != nil {
+ return nil, err
}
return Types(dynamicpb.NewMessage(descriptor))(e)
}
@@ -577,7 +789,7 @@ func DeclareContextProto(descriptor protoreflect.MessageDescriptor) EnvOption {
//
// Consider using with `DeclareContextProto` to simplify variable type declarations and publishing when using
// protocol buffers.
-func ContextProtoVars(ctx proto.Message) (interpreter.Activation, error) {
+func ContextProtoVars(ctx proto.Message) (Activation, error) {
if ctx == nil || !ctx.ProtoReflect().IsValid() {
return interpreter.EmptyActivation(), nil
}
@@ -601,7 +813,7 @@ func ContextProtoVars(ctx proto.Message) (interpreter.Activation, error) {
}
vars[field.TextName()] = fieldVal
}
- return interpreter.NewActivation(vars)
+ return NewActivation(vars)
}
// EnableMacroCallTracking ensures that call expressions which are replaced by macros
@@ -610,6 +822,12 @@ func EnableMacroCallTracking() EnvOption {
return features(featureEnableMacroCallTracking, true)
}
+// EnableIdentifierEscapeSyntax enables identifier escaping (`) syntax for
+// fields.
+func EnableIdentifierEscapeSyntax() EnvOption {
+ return features(featureIdentEscapeSyntax, true)
+}
+
// CrossTypeNumericComparisons makes it possible to compare across numeric types, e.g. double < int
func CrossTypeNumericComparisons(enabled bool) EnvOption {
return features(featureCrossTypeNumericComparisons, enabled)
@@ -647,6 +865,15 @@ func ParserExpressionSizeLimit(limit int) EnvOption {
}
}
+// EnableHiddenAccumulatorName sets the parser to use the identifier '@result' for accumulators
+// which is not normally accessible from CEL source.
+func EnableHiddenAccumulatorName(enabled bool) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.prsrOpts = append(e.prsrOpts, parser.EnableHiddenAccumulatorName(enabled))
+ return e, nil
+ }
+}
+
func maybeInteropProvider(provider any) (types.Provider, error) {
switch p := provider.(type) {
case types.Provider:
diff --git a/vendor/github.com/google/cel-go/cel/program.go b/vendor/github.com/google/cel-go/cel/program.go
index 2dd72f750..24f41a4a7 100644
--- a/vendor/github.com/google/cel-go/cel/program.go
+++ b/vendor/github.com/google/cel-go/cel/program.go
@@ -19,7 +19,7 @@ import (
"fmt"
"sync"
- celast "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/interpreter"
@@ -29,7 +29,7 @@ import (
type Program interface {
// Eval returns the result of an evaluation of the Ast and environment against the input vars.
//
- // The vars value may either be an `interpreter.Activation` or a `map[string]any`.
+ // The vars value may either be an `Activation` or a `map[string]any`.
//
// If the `OptTrackState`, `OptTrackCost` or `OptExhaustiveEval` flags are used, the `details` response will
// be non-nil. Given this caveat on `details`, the return state from evaluation will be:
@@ -47,14 +47,39 @@ type Program interface {
// to support cancellation and timeouts. This method must be used in conjunction with the
// InterruptCheckFrequency() option for cancellation interrupts to be impact evaluation.
//
- // The vars value may either be an `interpreter.Activation` or `map[string]any`.
+ // The vars value may either be an `Activation` or `map[string]any`.
//
// The output contract for `ContextEval` is otherwise identical to the `Eval` method.
ContextEval(context.Context, any) (ref.Val, *EvalDetails, error)
}
+// Activation used to resolve identifiers by name and references by id.
+//
+// An Activation is the primary mechanism by which a caller supplies input into a CEL program.
+type Activation = interpreter.Activation
+
+// NewActivation returns an activation based on a map-based binding where the map keys are
+// expected to be qualified names used with ResolveName calls.
+//
+// The input `bindings` may either be of type `Activation` or `map[string]any`.
+//
+// Lazy bindings may be supplied within the map-based input in either of the following forms:
+// - func() any
+// - func() ref.Val
+//
+// The output of the lazy binding will overwrite the variable reference in the internal map.
+//
+// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
+// the types.Adapter configured in the environment.
+func NewActivation(bindings any) (Activation, error) {
+ return interpreter.NewActivation(bindings)
+}
+
+// PartialActivation extends the Activation interface with a set of unknown AttributePatterns.
+type PartialActivation = interpreter.PartialActivation
+
// NoVars returns an empty Activation.
-func NoVars() interpreter.Activation {
+func NoVars() Activation {
return interpreter.EmptyActivation()
}
@@ -64,10 +89,9 @@ func NoVars() interpreter.Activation {
// This method relies on manually configured sets of missing attribute patterns. For a method which
// infers the missing variables from the input and the configured environment, use Env.PartialVars().
//
-// The `vars` value may either be an interpreter.Activation or any valid input to the
-// interpreter.NewActivation call.
+// The `vars` value may either be an Activation or any valid input to the NewActivation call.
func PartialVars(vars any,
- unknowns ...*interpreter.AttributePattern) (interpreter.PartialActivation, error) {
+ unknowns ...*AttributePatternType) (PartialActivation, error) {
return interpreter.NewPartialActivation(vars, unknowns...)
}
@@ -84,12 +108,15 @@ func PartialVars(vars any,
// fully qualified variable name may be `ns.app.a`, `ns.a`, or `a` per the CEL namespace resolution
// rules. Pick the fully qualified variable name that makes sense within the container as the
// AttributePattern `varName` argument.
+func AttributePattern(varName string) *AttributePatternType {
+ return interpreter.NewAttributePattern(varName)
+}
+
+// AttributePatternType represents a top-level variable with an optional set of qualifier patterns.
//
// See the interpreter.AttributePattern and interpreter.AttributeQualifierPattern for more info
// about how to create and manipulate AttributePattern values.
-func AttributePattern(varName string) *interpreter.AttributePattern {
- return interpreter.NewAttributePattern(varName)
-}
+type AttributePatternType = interpreter.AttributePattern
// EvalDetails holds additional information observed during the Eval() call.
type EvalDetails struct {
@@ -100,6 +127,9 @@ type EvalDetails struct {
// State of the evaluation, non-nil if the OptTrackState or OptExhaustiveEval is specified
// within EvalOptions.
func (ed *EvalDetails) State() interpreter.EvalState {
+ if ed == nil {
+ return interpreter.NewEvalState()
+ }
return ed.state
}
@@ -117,52 +147,39 @@ func (ed *EvalDetails) ActualCost() *uint64 {
type prog struct {
*Env
evalOpts EvalOption
- defaultVars interpreter.Activation
+ defaultVars Activation
dispatcher interpreter.Dispatcher
interpreter interpreter.Interpreter
interruptCheckFrequency uint
// Intermediate state used to configure the InterpretableDecorator set provided
// to the initInterpretable call.
- decorators []interpreter.InterpretableDecorator
+ plannerOptions []interpreter.PlannerOption
regexOptimizations []*interpreter.RegexOptimization
// Interpretable configured from an Ast and aggregate decorator set based on program options.
interpretable interpreter.Interpretable
+ observable *interpreter.ObservableInterpretable
callCostEstimator interpreter.ActualCostEstimator
costOptions []interpreter.CostTrackerOption
costLimit *uint64
}
-func (p *prog) clone() *prog {
- costOptsCopy := make([]interpreter.CostTrackerOption, len(p.costOptions))
- copy(costOptsCopy, p.costOptions)
-
- return &prog{
- Env: p.Env,
- evalOpts: p.evalOpts,
- defaultVars: p.defaultVars,
- dispatcher: p.dispatcher,
- interpreter: p.interpreter,
- interruptCheckFrequency: p.interruptCheckFrequency,
- }
-}
-
// newProgram creates a program instance with an environment, an ast, and an optional list of
// ProgramOption values.
//
// If the program cannot be configured the prog will be nil, with a non-nil error response.
-func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
+func newProgram(e *Env, a *ast.AST, opts []ProgramOption) (Program, error) {
// Build the dispatcher, interpreter, and default program value.
disp := interpreter.NewDispatcher()
// Ensure the default attribute factory is set after the adapter and provider are
// configured.
p := &prog{
- Env: e,
- decorators: []interpreter.InterpretableDecorator{},
- dispatcher: disp,
- costOptions: []interpreter.CostTrackerOption{},
+ Env: e,
+ plannerOptions: []interpreter.PlannerOption{},
+ dispatcher: disp,
+ costOptions: []interpreter.CostTrackerOption{},
}
// Configure the program via the ProgramOption values.
@@ -188,128 +205,83 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
// Set the attribute factory after the options have been set.
var attrFactory interpreter.AttributeFactory
+ attrFactorOpts := []interpreter.AttrFactoryOption{
+ interpreter.EnableErrorOnBadPresenceTest(p.HasFeature(featureEnableErrorOnBadPresenceTest)),
+ }
if p.evalOpts&OptPartialEval == OptPartialEval {
- attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider)
+ attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider, attrFactorOpts...)
} else {
- attrFactory = interpreter.NewAttributeFactory(e.Container, e.adapter, e.provider)
+ attrFactory = interpreter.NewAttributeFactory(e.Container, e.adapter, e.provider, attrFactorOpts...)
}
interp := interpreter.NewInterpreter(disp, e.Container, e.provider, e.adapter, attrFactory)
p.interpreter = interp
// Translate the EvalOption flags into InterpretableDecorator instances.
- decorators := make([]interpreter.InterpretableDecorator, len(p.decorators))
- copy(decorators, p.decorators)
+ plannerOptions := make([]interpreter.PlannerOption, len(p.plannerOptions))
+ copy(plannerOptions, p.plannerOptions)
// Enable interrupt checking if there's a non-zero check frequency
if p.interruptCheckFrequency > 0 {
- decorators = append(decorators, interpreter.InterruptableEval())
+ plannerOptions = append(plannerOptions, interpreter.InterruptableEval())
}
// Enable constant folding first.
if p.evalOpts&OptOptimize == OptOptimize {
- decorators = append(decorators, interpreter.Optimize())
+ plannerOptions = append(plannerOptions, interpreter.Optimize())
p.regexOptimizations = append(p.regexOptimizations, interpreter.MatchesRegexOptimization)
}
// Enable regex compilation of constants immediately after folding constants.
if len(p.regexOptimizations) > 0 {
- decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...))
- }
- // Enable compile-time checking of syntax/cardinality for string.format calls.
- if p.evalOpts&OptCheckStringFormat == OptCheckStringFormat {
- var isValidType func(id int64, validTypes ...ref.Type) (bool, error)
- if ast.IsChecked() {
- isValidType = func(id int64, validTypes ...ref.Type) (bool, error) {
- t := ast.typeMap[id]
- if t.Kind() == DynKind {
- return true, nil
- }
- for _, vt := range validTypes {
- k, err := typeValueToKind(vt)
- if err != nil {
- return false, err
- }
- if t.Kind() == k {
- return true, nil
- }
- }
- return false, nil
- }
- } else {
- // if the AST isn't type-checked, short-circuit validation
- isValidType = func(id int64, validTypes ...ref.Type) (bool, error) {
- return true, nil
- }
- }
- decorators = append(decorators, interpreter.InterpolateFormattedString(isValidType))
+ plannerOptions = append(plannerOptions, interpreter.CompileRegexConstants(p.regexOptimizations...))
}
// Enable exhaustive eval, state tracking and cost tracking last since they require a factory.
if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 {
- factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) {
- costTracker.Estimator = p.callCostEstimator
- costTracker.Limit = p.costLimit
- for _, costOpt := range p.costOptions {
- err := costOpt(costTracker)
- if err != nil {
- return nil, err
- }
- }
- // Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This
- // prevents the underlying memory from being shared between factory function calls causing
- // undesired mutations.
- decs := decorators[:len(decorators):len(decorators)]
- var observers []interpreter.EvalObserver
-
- if p.evalOpts&(OptExhaustiveEval|OptTrackState) != 0 {
- // EvalStateObserver is required for OptExhaustiveEval.
- observers = append(observers, interpreter.EvalStateObserver(state))
- }
- if p.evalOpts&OptTrackCost == OptTrackCost {
- observers = append(observers, interpreter.CostObserver(costTracker))
- }
-
- // Enable exhaustive eval over a basic observer since it offers a superset of features.
- if p.evalOpts&OptExhaustiveEval == OptExhaustiveEval {
- decs = append(decs, interpreter.ExhaustiveEval(), interpreter.Observe(observers...))
- } else if len(observers) > 0 {
- decs = append(decs, interpreter.Observe(observers...))
- }
-
- return p.clone().initInterpretable(ast, decs)
+ costOptCount := len(p.costOptions)
+ if p.costLimit != nil {
+ costOptCount++
}
- return newProgGen(factory)
- }
- return p.initInterpretable(ast, decorators)
-}
-
-func (p *prog) initInterpretable(ast *Ast, decs []interpreter.InterpretableDecorator) (*prog, error) {
- // Unchecked programs do not contain type and reference information and may be slower to execute.
- if !ast.IsChecked() {
- interpretable, err :=
- p.interpreter.NewUncheckedInterpretable(ast.Expr(), decs...)
- if err != nil {
- return nil, err
+ costOpts := make([]interpreter.CostTrackerOption, 0, costOptCount)
+ costOpts = append(costOpts, p.costOptions...)
+ if p.costLimit != nil {
+ costOpts = append(costOpts, interpreter.CostTrackerLimit(*p.costLimit))
+ }
+ trackerFactory := func() (*interpreter.CostTracker, error) {
+ return interpreter.NewCostTracker(p.callCostEstimator, costOpts...)
+ }
+ var observers []interpreter.PlannerOption
+ if p.evalOpts&(OptExhaustiveEval|OptTrackState) != 0 {
+ // EvalStateObserver is required for OptExhaustiveEval.
+ observers = append(observers, interpreter.EvalStateObserver())
+ }
+ if p.evalOpts&OptTrackCost == OptTrackCost {
+ observers = append(observers, interpreter.CostObserver(interpreter.CostTrackerFactory(trackerFactory)))
+ }
+ // Enable exhaustive eval over a basic observer since it offers a superset of features.
+ if p.evalOpts&OptExhaustiveEval == OptExhaustiveEval {
+ plannerOptions = append(plannerOptions,
+ append([]interpreter.PlannerOption{interpreter.ExhaustiveEval()}, observers...)...)
+ } else if len(observers) > 0 {
+ plannerOptions = append(plannerOptions, observers...)
}
- p.interpretable = interpretable
- return p, nil
}
+ return p.initInterpretable(a, plannerOptions)
+}
- // When the AST has been checked it contains metadata that can be used to speed up program execution.
- checked := &celast.CheckedAST{
- Expr: ast.Expr(),
- SourceInfo: ast.SourceInfo(),
- TypeMap: ast.typeMap,
- ReferenceMap: ast.refMap,
- }
- interpretable, err := p.interpreter.NewInterpretable(checked, decs...)
+func (p *prog) initInterpretable(a *ast.AST, plannerOptions []interpreter.PlannerOption) (*prog, error) {
+ // When the AST has been exprAST it contains metadata that can be used to speed up program execution.
+ interpretable, err := p.interpreter.NewInterpretable(a, plannerOptions...)
if err != nil {
return nil, err
}
p.interpretable = interpretable
+ if oi, ok := interpretable.(*interpreter.ObservableInterpretable); ok {
+ p.observable = oi
+ }
return p, nil
}
// Eval implements the Program interface method.
-func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) {
+func (p *prog) Eval(input any) (out ref.Val, det *EvalDetails, err error) {
// Configure error recovery for unexpected panics during evaluation. Note, the use of named
// return values makes it possible to modify the error response during the recovery
// function.
@@ -324,9 +296,9 @@ func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) {
}
}()
// Build a hierarchical activation if there are default vars set.
- var vars interpreter.Activation
+ var vars Activation
switch v := input.(type) {
- case interpreter.Activation:
+ case Activation:
vars = v
case map[string]any:
vars = activationPool.Setup(v)
@@ -337,12 +309,24 @@ func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) {
if p.defaultVars != nil {
vars = interpreter.NewHierarchicalActivation(p.defaultVars, vars)
}
- v = p.interpretable.Eval(vars)
+ if p.observable != nil {
+ det = &EvalDetails{}
+ out = p.observable.ObserveEval(vars, func(observed any) {
+ switch o := observed.(type) {
+ case interpreter.EvalState:
+ det.state = o
+ case *interpreter.CostTracker:
+ det.costTracker = o
+ }
+ })
+ } else {
+ out = p.interpretable.Eval(vars)
+ }
// The output of an internal Eval may have a value (`v`) that is a types.Err. This step
// translates the CEL value to a Go error response. This interface does not quite match the
// RPC signature which allows for multiple errors to be returned, but should be sufficient.
- if types.IsError(v) {
- err = v.(*types.Err)
+ if types.IsError(out) {
+ err = out.(*types.Err)
}
return
}
@@ -354,9 +338,9 @@ func (p *prog) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetail
}
// Configure the input, making sure to wrap Activation inputs in the special ctxActivation which
// exposes the #interrupted variable and manages rate-limited checks of the ctx.Done() state.
- var vars interpreter.Activation
+ var vars Activation
switch v := input.(type) {
- case interpreter.Activation:
+ case Activation:
vars = ctxActivationPool.Setup(v, ctx.Done(), p.interruptCheckFrequency)
defer ctxActivationPool.Put(vars)
case map[string]any:
@@ -370,90 +354,8 @@ func (p *prog) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetail
return p.Eval(vars)
}
-// progFactory is a helper alias for marking a program creation factory function.
-type progFactory func(interpreter.EvalState, *interpreter.CostTracker) (Program, error)
-
-// progGen holds a reference to a progFactory instance and implements the Program interface.
-type progGen struct {
- factory progFactory
-}
-
-// newProgGen tests the factory object by calling it once and returns a factory-based Program if
-// the test is successful.
-func newProgGen(factory progFactory) (Program, error) {
- // Test the factory to make sure that configuration errors are spotted at config
- tracker, err := interpreter.NewCostTracker(nil)
- if err != nil {
- return nil, err
- }
- _, err = factory(interpreter.NewEvalState(), tracker)
- if err != nil {
- return nil, err
- }
- return &progGen{factory: factory}, nil
-}
-
-// Eval implements the Program interface method.
-func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) {
- // The factory based Eval() differs from the standard evaluation model in that it generates a
- // new EvalState instance for each call to ensure that unique evaluations yield unique stateful
- // results.
- state := interpreter.NewEvalState()
- costTracker, err := interpreter.NewCostTracker(nil)
- if err != nil {
- return nil, nil, err
- }
- det := &EvalDetails{state: state, costTracker: costTracker}
-
- // Generate a new instance of the interpretable using the factory configured during the call to
- // newProgram(). It is incredibly unlikely that the factory call will generate an error given
- // the factory test performed within the Program() call.
- p, err := gen.factory(state, costTracker)
- if err != nil {
- return nil, det, err
- }
-
- // Evaluate the input, returning the result and the 'state' within EvalDetails.
- v, _, err := p.Eval(input)
- if err != nil {
- return v, det, err
- }
- return v, det, nil
-}
-
-// ContextEval implements the Program interface method.
-func (gen *progGen) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) {
- if ctx == nil {
- return nil, nil, fmt.Errorf("context can not be nil")
- }
- // The factory based Eval() differs from the standard evaluation model in that it generates a
- // new EvalState instance for each call to ensure that unique evaluations yield unique stateful
- // results.
- state := interpreter.NewEvalState()
- costTracker, err := interpreter.NewCostTracker(nil)
- if err != nil {
- return nil, nil, err
- }
- det := &EvalDetails{state: state, costTracker: costTracker}
-
- // Generate a new instance of the interpretable using the factory configured during the call to
- // newProgram(). It is incredibly unlikely that the factory call will generate an error given
- // the factory test performed within the Program() call.
- p, err := gen.factory(state, costTracker)
- if err != nil {
- return nil, det, err
- }
-
- // Evaluate the input, returning the result and the 'state' within EvalDetails.
- v, _, err := p.ContextEval(ctx, input)
- if err != nil {
- return v, det, err
- }
- return v, det, nil
-}
-
type ctxEvalActivation struct {
- parent interpreter.Activation
+ parent Activation
interrupt <-chan struct{}
interruptCheckCount uint
interruptCheckFrequency uint
@@ -477,10 +379,15 @@ func (a *ctxEvalActivation) ResolveName(name string) (any, bool) {
return a.parent.ResolveName(name)
}
-func (a *ctxEvalActivation) Parent() interpreter.Activation {
+func (a *ctxEvalActivation) Parent() Activation {
return a.parent
}
+func (a *ctxEvalActivation) AsPartialActivation() (interpreter.PartialActivation, bool) {
+ pa, ok := a.parent.(interpreter.PartialActivation)
+ return pa, ok
+}
+
func newCtxEvalActivationPool() *ctxEvalActivationPool {
return &ctxEvalActivationPool{
Pool: sync.Pool{
@@ -496,7 +403,7 @@ type ctxEvalActivationPool struct {
}
// Setup initializes a pooled Activation with the ability check for context.Context cancellation
-func (p *ctxEvalActivationPool) Setup(vars interpreter.Activation, done <-chan struct{}, interruptCheckRate uint) *ctxEvalActivation {
+func (p *ctxEvalActivationPool) Setup(vars Activation, done <-chan struct{}, interruptCheckRate uint) *ctxEvalActivation {
a := p.Pool.Get().(*ctxEvalActivation)
a.parent = vars
a.interrupt = done
@@ -545,8 +452,8 @@ func (a *evalActivation) ResolveName(name string) (any, bool) {
}
}
-// Parent implements the interpreter.Activation interface
-func (a *evalActivation) Parent() interpreter.Activation {
+// Parent implements the Activation interface
+func (a *evalActivation) Parent() Activation {
return nil
}
@@ -580,8 +487,6 @@ func (p *evalActivationPool) Put(value any) {
}
var (
- emptyEvalState = interpreter.NewEvalState()
-
// activationPool is an internally managed pool of Activation values that wrap map[string]any inputs
activationPool = newEvalActivationPool()
diff --git a/vendor/github.com/google/cel-go/cel/prompt.go b/vendor/github.com/google/cel-go/cel/prompt.go
new file mode 100644
index 000000000..929a26f91
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/prompt.go
@@ -0,0 +1,155 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ _ "embed"
+ "sort"
+ "strings"
+ "text/template"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+)
+
+//go:embed templates/authoring.tmpl
+var authoringPrompt string
+
+// AuthoringPrompt creates a prompt template from a CEL environment for the purpose of AI-assisted authoring.
+func AuthoringPrompt(env *Env) (*Prompt, error) {
+ funcMap := template.FuncMap{
+ "split": func(str string) []string { return strings.Split(str, "\n") },
+ }
+ tmpl := template.New("cel").Funcs(funcMap)
+ tmpl, err := tmpl.Parse(authoringPrompt)
+ if err != nil {
+ return nil, err
+ }
+ return &Prompt{
+ Persona: defaultPersona,
+ FormatRules: defaultFormatRules,
+ GeneralUsage: defaultGeneralUsage,
+ tmpl: tmpl,
+ env: env,
+ }, nil
+}
+
+// Prompt represents the core components of an LLM prompt based on a CEL environment.
+//
+// All fields of the prompt may be overwritten / modified with support for rendering the
+// prompt to a human-readable string.
+type Prompt struct {
+ // Persona indicates something about the kind of user making the request
+ Persona string
+
+ // FormatRules indicate how the LLM should generate its output
+ FormatRules string
+
+ // GeneralUsage specifies additional context on how CEL should be used.
+ GeneralUsage string
+
+ // tmpl is the text template base-configuration for rendering text.
+ tmpl *template.Template
+
+ // env reference used to collect variables, functions, and macros available to the prompt.
+ env *Env
+}
+
+type promptInst struct {
+ *Prompt
+
+ Variables []*common.Doc
+ Macros []*common.Doc
+ Functions []*common.Doc
+ UserPrompt string
+}
+
+// Render renders the user prompt with the associated context from the prompt template
+// for use with LLM generators.
+func (p *Prompt) Render(userPrompt string) string {
+ var buffer strings.Builder
+ vars := make([]*common.Doc, len(p.env.Variables()))
+ for i, v := range p.env.Variables() {
+ vars[i] = v.Documentation()
+ }
+ sort.SliceStable(vars, func(i, j int) bool {
+ return vars[i].Name < vars[j].Name
+ })
+ macs := make([]*common.Doc, len(p.env.Macros()))
+ for i, m := range p.env.Macros() {
+ macs[i] = m.(common.Documentor).Documentation()
+ }
+ funcs := make([]*common.Doc, 0, len(p.env.Functions()))
+ for _, f := range p.env.Functions() {
+ if _, hidden := hiddenFunctions[f.Name()]; hidden {
+ continue
+ }
+ funcs = append(funcs, f.Documentation())
+ }
+ sort.SliceStable(funcs, func(i, j int) bool {
+ return funcs[i].Name < funcs[j].Name
+ })
+ inst := &promptInst{
+ Prompt: p,
+ Variables: vars,
+ Macros: macs,
+ Functions: funcs,
+ UserPrompt: userPrompt}
+ p.tmpl.Execute(&buffer, inst)
+ return buffer.String()
+}
+
+const (
+ defaultPersona = `You are a software engineer with expertise in networking and application security
+authoring boolean Common Expression Language (CEL) expressions to ensure firewall,
+networking, authentication, and data access is only permitted when all conditions
+are satisfied.`
+
+ defaultFormatRules = `Output your response as a CEL expression.
+
+Write the expression with the comment on the first line and the expression on the
+subsequent lines. Format the expression using 80-character line limits commonly
+found in C++ or Java code.`
+
+ defaultGeneralUsage = `CEL supports Protocol Buffer and JSON types, as well as simple types and aggregate types.
+
+Simple types include bool, bytes, double, int, string, and uint:
+
+* double literals must always include a decimal point: 1.0, 3.5, -2.2
+* uint literals must be positive values suffixed with a 'u': 42u
+* byte literals are strings prefixed with a 'b': b'1235'
+* string literals can use either single quotes or double quotes: 'hello', "world"
+* string literals can also be treated as raw strings that do not require any
+ escaping within the string by using the 'R' prefix: R"""quote: "hi" """
+
+Aggregate types include list and map:
+
+* list literals consist of zero or more values between brackets: "['a', 'b', 'c']"
+* map literal consist of colon-separated key-value pairs within braces: "{'key1': 1, 'key2': 2}"
+* Only int, uint, string, and bool types are valid map keys.
+* Maps containing HTTP headers must always use lower-cased string keys.
+
+Comments start with two-forward slashes followed by text and a newline.`
+)
+
+var (
+ hiddenFunctions = map[string]bool{
+ overloads.DeprecatedIn: true,
+ operators.OldIn: true,
+ operators.OldNotStrictlyFalse: true,
+ operators.NotStrictlyFalse: true,
+ }
+)
diff --git a/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl b/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl
new file mode 100644
index 000000000..d6b3da5c6
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl
@@ -0,0 +1,56 @@
+{{define "variable"}}{{.Name}} is a {{.Type}}
+{{- end -}}
+
+{{define "macro" -}}
+{{.Name}} macro{{if .Description}} - {{range split .Description}}{{.}} {{end}}
+{{end}}
+{{range .Children}}{{range split .Description}} {{.}}
+{{end}}
+{{- end -}}
+{{- end -}}
+
+{{define "overload" -}}
+{{if .Children}}{{range .Children}}{{range split .Description}} {{.}}
+{{end}}
+{{- end -}}
+{{else}} {{.Signature}}
+{{end}}
+{{- end -}}
+
+{{define "function" -}}
+{{.Name}}{{if .Description}} - {{range split .Description}}{{.}} {{end}}
+{{end}}
+{{range .Children}}{{template "overload" .}}{{end}}
+{{- end -}}
+
+{{.Persona}}
+
+{{.FormatRules}}
+
+{{if or .Variables .Macros .Functions -}}
+Only use the following variables, macros, and functions in expressions.
+{{if .Variables}}
+Variables:
+
+{{range .Variables}}* {{template "variable" .}}
+{{end -}}
+
+{{end -}}
+{{if .Macros}}
+Macros:
+
+{{range .Macros}}* {{template "macro" .}}
+{{end -}}
+
+{{end -}}
+{{if .Functions}}
+Functions:
+
+{{range .Functions}}* {{template "function" .}}
+{{end -}}
+
+{{end -}}
+{{- end -}}
+{{.GeneralUsage}}
+
+{{.UserPrompt}}
diff --git a/vendor/github.com/google/cel-go/cel/validator.go b/vendor/github.com/google/cel-go/cel/validator.go
index 78b311381..5f06b2dd5 100644
--- a/vendor/github.com/google/cel-go/cel/validator.go
+++ b/vendor/github.com/google/cel-go/cel/validator.go
@@ -20,13 +20,16 @@ import (
"regexp"
"github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/env"
"github.com/google/cel-go/common/overloads"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
const (
- homogeneousValidatorName = "cel.lib.std.validate.types.homogeneous"
+ durationValidatorName = "cel.validator.duration"
+ regexValidatorName = "cel.validator.matches"
+ timestampValidatorName = "cel.validator.timestamp"
+ homogeneousValidatorName = "cel.validator.homogeneous_literals"
+ nestingLimitValidatorName = "cel.validator.comprehension_nesting_limit"
// HomogeneousAggregateLiteralExemptFunctions is the ValidatorConfig key used to configure
// the set of function names which are exempt from homogeneous type checks. The expected type
@@ -38,6 +41,35 @@ const (
HomogeneousAggregateLiteralExemptFunctions = homogeneousValidatorName + ".exempt"
)
+var (
+ astValidatorFactories = map[string]ASTValidatorFactory{
+ nestingLimitValidatorName: func(val *env.Validator) (ASTValidator, error) {
+ if limit, found := val.ConfigValue("limit"); found {
+ if val, isInt := limit.(int); isInt {
+ return ValidateComprehensionNestingLimit(val), nil
+ }
+ return nil, fmt.Errorf("invalid validator: %s unsupported limit type: %v", nestingLimitValidatorName, limit)
+ }
+ return nil, fmt.Errorf("invalid validator: %s missing limit", nestingLimitValidatorName)
+ },
+ durationValidatorName: func(*env.Validator) (ASTValidator, error) {
+ return ValidateDurationLiterals(), nil
+ },
+ regexValidatorName: func(*env.Validator) (ASTValidator, error) {
+ return ValidateRegexLiterals(), nil
+ },
+ timestampValidatorName: func(*env.Validator) (ASTValidator, error) {
+ return ValidateTimestampLiterals(), nil
+ },
+ homogeneousValidatorName: func(*env.Validator) (ASTValidator, error) {
+ return ValidateHomogeneousAggregateLiterals(), nil
+ },
+ }
+)
+
+// ASTValidatorFactory creates an ASTValidator as configured by the input map
+type ASTValidatorFactory func(*env.Validator) (ASTValidator, error)
+
// ASTValidators configures a set of ASTValidator instances into the target environment.
//
// Validators are applied in the order in which the are specified and are treated as singletons.
@@ -69,7 +101,19 @@ type ASTValidator interface {
//
// See individual validators for more information on their configuration keys and configuration
// properties.
- Validate(*Env, ValidatorConfig, *ast.CheckedAST, *Issues)
+ Validate(*Env, ValidatorConfig, *ast.AST, *Issues)
+}
+
+// ConfigurableASTValidator supports conversion of an object to an `env.Validator` instance used for
+// YAML serialization.
+type ConfigurableASTValidator interface {
+ // ToConfig converts the internal configuration of an ASTValidator into an env.Validator instance
+ // which minimally must include the validator name, but may also include a map[string]any config
+ // object to be serialized to YAML. The string keys represent the configuration parameter name,
+ // and the any value must mirror the internally supported type associated with the config key.
+ //
+ // Note: only primitive CEL types are supported by CEL validators at this time.
+ ToConfig() *env.Validator
}
// ValidatorConfig provides an accessor method for querying validator configuration state.
@@ -180,7 +224,7 @@ func ValidateComprehensionNestingLimit(limit int) ASTValidator {
return nestingLimitValidator{limit: limit}
}
-type argChecker func(env *Env, call, arg ast.NavigableExpr) error
+type argChecker func(env *Env, call, arg ast.Expr) error
func newFormatValidator(funcName string, argNum int, check argChecker) formatValidator {
return formatValidator{
@@ -198,13 +242,18 @@ type formatValidator struct {
// Name returns the unique name of this function format validator.
func (v formatValidator) Name() string {
- return fmt.Sprintf("cel.lib.std.validate.functions.%s", v.funcName)
+ return fmt.Sprintf("cel.validator.%s", v.funcName)
+}
+
+// ToConfig converts the ASTValidator to an env.Validator specifying the validator name.
+func (v formatValidator) ToConfig() *env.Validator {
+ return env.NewValidator(v.Name())
}
// Validate searches the AST for uses of a given function name with a constant argument and performs a check
// on whether the argument is a valid literal value.
-func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
- root := ast.NavigateCheckedAST(a)
+func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) {
+ root := ast.NavigateAST(a)
funcCalls := ast.MatchDescendants(root, ast.FunctionMatcher(v.funcName))
for _, call := range funcCalls {
callArgs := call.AsCall().Args()
@@ -221,8 +270,8 @@ func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST,
}
}
-func evalCall(env *Env, call, arg ast.NavigableExpr) error {
- ast := ParsedExprToAst(&exprpb.ParsedExpr{Expr: call.ToExpr()})
+func evalCall(env *Env, call, arg ast.Expr) error {
+ ast := &Ast{impl: ast.NewAST(call, ast.NewSourceInfo(nil))}
prg, err := env.Program(ast)
if err != nil {
return err
@@ -231,7 +280,7 @@ func evalCall(env *Env, call, arg ast.NavigableExpr) error {
return err
}
-func compileRegex(_ *Env, _, arg ast.NavigableExpr) error {
+func compileRegex(_ *Env, _, arg ast.Expr) error {
pattern := arg.AsLiteral().Value().(string)
_, err := regexp.Compile(pattern)
return err
@@ -244,25 +293,19 @@ func (homogeneousAggregateLiteralValidator) Name() string {
return homogeneousValidatorName
}
-// Configure implements the ASTValidatorConfigurer interface and currently sets the list of standard
-// and exempt functions from homogeneous aggregate literal checks.
-//
-// TODO: Move this call into the string.format() ASTValidator once ported.
-func (homogeneousAggregateLiteralValidator) Configure(c MutableValidatorConfig) error {
- emptyList := []string{}
- exemptFunctions := c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, emptyList).([]string)
- exemptFunctions = append(exemptFunctions, "format")
- return c.Set(HomogeneousAggregateLiteralExemptFunctions, exemptFunctions)
+// ToConfig converts the ASTValidator to an env.Validator specifying the validator name.
+func (v homogeneousAggregateLiteralValidator) ToConfig() *env.Validator {
+ return env.NewValidator(v.Name())
}
// Validate validates that all lists and map literals have homogeneous types, i.e. don't contain dyn types.
//
// This validator makes an exception for list and map literals which occur at any level of nesting within
// string format calls.
-func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
+func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.AST, iss *Issues) {
var exemptedFunctions []string
exemptedFunctions = c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, exemptedFunctions).([]string)
- root := ast.NavigateCheckedAST(a)
+ root := ast.NavigateAST(a)
listExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.ListKind))
for _, listExpr := range listExprs {
if inExemptFunction(listExpr, exemptedFunctions) {
@@ -273,7 +316,7 @@ func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig
optIndices := l.OptionalIndices()
var elemType *Type
for i, e := range elements {
- et := e.Type()
+ et := a.GetType(e.ID())
if isOptionalIndex(i, optIndices) {
et = et.Parameters()[0]
}
@@ -296,9 +339,10 @@ func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig
entries := m.Entries()
var keyType, valType *Type
for _, e := range entries {
- key, val := e.Key(), e.Value()
- kt, vt := key.Type(), val.Type()
- if e.IsOptional() {
+ mapEntry := e.AsMapEntry()
+ key, val := mapEntry.Key(), mapEntry.Value()
+ kt, vt := a.GetType(key.ID()), a.GetType(val.ID())
+ if mapEntry.IsOptional() {
vt = vt.Parameters()[0]
}
if keyType == nil && valType == nil {
@@ -316,7 +360,8 @@ func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig
}
func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool {
- if parent, found := e.Parent(); found {
+ parent, found := e.Parent()
+ for found {
if parent.Kind() == ast.CallKind {
fnName := parent.AsCall().FunctionName()
for _, exempt := range exemptFunctions {
@@ -325,9 +370,7 @@ func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool {
}
}
}
- if parent.Kind() == ast.ListKind || parent.Kind() == ast.MapKind {
- return inExemptFunction(parent, exemptFunctions)
- }
+ parent, found = parent.Parent()
}
return false
}
@@ -349,12 +392,20 @@ type nestingLimitValidator struct {
limit int
}
+// Name returns the name of the nesting limit validator.
func (v nestingLimitValidator) Name() string {
- return "cel.lib.std.validate.comprehension_nesting_limit"
+ return nestingLimitValidatorName
+}
+
+// ToConfig converts the ASTValidator to an env.Validator specifying the validator name and the nesting limit
+// as an integer value: {"limit": int}
+func (v nestingLimitValidator) ToConfig() *env.Validator {
+ return env.NewValidator(v.Name()).SetConfig(map[string]any{"limit": v.limit})
}
-func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
- root := ast.NavigateCheckedAST(a)
+// Validate implements the ASTValidator interface method.
+func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) {
+ root := ast.NavigateAST(a)
comprehensions := ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind))
if len(comprehensions) <= v.limit {
return
diff --git a/vendor/github.com/google/cel-go/checker/BUILD.bazel b/vendor/github.com/google/cel-go/checker/BUILD.bazel
index 0459d3523..678b412a9 100644
--- a/vendor/github.com/google/cel-go/checker/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/checker/BUILD.bazel
@@ -16,7 +16,6 @@ go_library(
"options.go",
"printer.go",
"scopes.go",
- "standard.go",
"types.go",
],
importpath = "github.com/google/cel-go/checker",
@@ -60,7 +59,6 @@ go_test(
"//test:go_default_library",
"//test/proto2pb:go_default_library",
"//test/proto3pb:go_default_library",
- "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/checker/checker.go b/vendor/github.com/google/cel-go/checker/checker.go
index 720e4fa96..0057c16cc 100644
--- a/vendor/github.com/google/cel-go/checker/checker.go
+++ b/vendor/github.com/google/cel-go/checker/checker.go
@@ -18,6 +18,7 @@ package checker
import (
"fmt"
+ "reflect"
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/ast"
@@ -25,139 +26,98 @@ import (
"github.com/google/cel-go/common/decls"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/types"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ "github.com/google/cel-go/common/types/ref"
)
type checker struct {
+ *ast.AST
+ ast.ExprFactory
env *Env
errors *typeErrors
mappings *mapping
freeTypeVarCounter int
- sourceInfo *exprpb.SourceInfo
- types map[int64]*types.Type
- references map[int64]*ast.ReferenceInfo
}
// Check performs type checking, giving a typed AST.
-// The input is a ParsedExpr proto and an env which encapsulates
-// type binding of variables, declarations of built-in functions,
-// descriptions of protocol buffers, and a registry for errors.
-// Returns a CheckedExpr proto, which might not be usable if
-// there are errors in the error registry.
-func Check(parsedExpr *exprpb.ParsedExpr, source common.Source, env *Env) (*ast.CheckedAST, *common.Errors) {
+//
+// The input is a parsed AST and an env which encapsulates type binding of variables,
+// declarations of built-in functions, descriptions of protocol buffers, and a registry for
+// errors.
+//
+// Returns a type-checked AST, which might not be usable if there are errors in the error
+// registry.
+func Check(parsed *ast.AST, source common.Source, env *Env) (*ast.AST, *common.Errors) {
errs := common.NewErrors(source)
+ typeMap := make(map[int64]*types.Type)
+ refMap := make(map[int64]*ast.ReferenceInfo)
c := checker{
+ AST: ast.NewCheckedAST(parsed, typeMap, refMap),
+ ExprFactory: ast.NewExprFactory(),
env: env,
errors: &typeErrors{errs: errs},
mappings: newMapping(),
freeTypeVarCounter: 0,
- sourceInfo: parsedExpr.GetSourceInfo(),
- types: make(map[int64]*types.Type),
- references: make(map[int64]*ast.ReferenceInfo),
}
- c.check(parsedExpr.GetExpr())
+ c.check(c.Expr())
- // Walk over the final type map substituting any type parameters either by their bound value or
- // by DYN.
- m := make(map[int64]*types.Type)
- for id, t := range c.types {
- m[id] = substitute(c.mappings, t, true)
+ // Walk over the final type map substituting any type parameters either by their bound value
+ // or by DYN.
+ for id, t := range c.TypeMap() {
+ c.SetType(id, substitute(c.mappings, t, true))
}
-
- return &ast.CheckedAST{
- Expr: parsedExpr.GetExpr(),
- SourceInfo: parsedExpr.GetSourceInfo(),
- TypeMap: m,
- ReferenceMap: c.references,
- }, errs
+ return c.AST, errs
}
-func (c *checker) check(e *exprpb.Expr) {
+func (c *checker) check(e ast.Expr) {
if e == nil {
return
}
- switch e.GetExprKind().(type) {
- case *exprpb.Expr_ConstExpr:
- literal := e.GetConstExpr()
- switch literal.GetConstantKind().(type) {
- case *exprpb.Constant_BoolValue:
- c.checkBoolLiteral(e)
- case *exprpb.Constant_BytesValue:
- c.checkBytesLiteral(e)
- case *exprpb.Constant_DoubleValue:
- c.checkDoubleLiteral(e)
- case *exprpb.Constant_Int64Value:
- c.checkInt64Literal(e)
- case *exprpb.Constant_NullValue:
- c.checkNullLiteral(e)
- case *exprpb.Constant_StringValue:
- c.checkStringLiteral(e)
- case *exprpb.Constant_Uint64Value:
- c.checkUint64Literal(e)
+ switch e.Kind() {
+ case ast.LiteralKind:
+ literal := ref.Val(e.AsLiteral())
+ switch literal.Type() {
+ case types.BoolType, types.BytesType, types.DoubleType, types.IntType,
+ types.NullType, types.StringType, types.UintType:
+ c.setType(e, literal.Type().(*types.Type))
+ default:
+ c.errors.unexpectedASTType(e.ID(), c.location(e), "literal", literal.Type().TypeName())
}
- case *exprpb.Expr_IdentExpr:
+ case ast.IdentKind:
c.checkIdent(e)
- case *exprpb.Expr_SelectExpr:
+ case ast.SelectKind:
c.checkSelect(e)
- case *exprpb.Expr_CallExpr:
+ case ast.CallKind:
c.checkCall(e)
- case *exprpb.Expr_ListExpr:
+ case ast.ListKind:
c.checkCreateList(e)
- case *exprpb.Expr_StructExpr:
+ case ast.MapKind:
+ c.checkCreateMap(e)
+ case ast.StructKind:
c.checkCreateStruct(e)
- case *exprpb.Expr_ComprehensionExpr:
+ case ast.ComprehensionKind:
c.checkComprehension(e)
default:
- c.errors.unexpectedASTType(e.GetId(), c.location(e), e)
+ c.errors.unexpectedASTType(e.ID(), c.location(e), "unspecified", reflect.TypeOf(e).Name())
}
}
-func (c *checker) checkInt64Literal(e *exprpb.Expr) {
- c.setType(e, types.IntType)
-}
-
-func (c *checker) checkUint64Literal(e *exprpb.Expr) {
- c.setType(e, types.UintType)
-}
-
-func (c *checker) checkStringLiteral(e *exprpb.Expr) {
- c.setType(e, types.StringType)
-}
-
-func (c *checker) checkBytesLiteral(e *exprpb.Expr) {
- c.setType(e, types.BytesType)
-}
-
-func (c *checker) checkDoubleLiteral(e *exprpb.Expr) {
- c.setType(e, types.DoubleType)
-}
-
-func (c *checker) checkBoolLiteral(e *exprpb.Expr) {
- c.setType(e, types.BoolType)
-}
-
-func (c *checker) checkNullLiteral(e *exprpb.Expr) {
- c.setType(e, types.NullType)
-}
-
-func (c *checker) checkIdent(e *exprpb.Expr) {
- identExpr := e.GetIdentExpr()
+func (c *checker) checkIdent(e ast.Expr) {
+ identName := e.AsIdent()
// Check to see if the identifier is declared.
- if ident := c.env.LookupIdent(identExpr.GetName()); ident != nil {
+ if ident := c.env.LookupIdent(identName); ident != nil {
c.setType(e, ident.Type())
c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value()))
// Overwrite the identifier with its fully qualified name.
- identExpr.Name = ident.Name()
+ e.SetKindCase(c.NewIdent(e.ID(), ident.Name()))
return
}
c.setType(e, types.ErrorType)
- c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), identExpr.GetName())
+ c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), identName)
}
-func (c *checker) checkSelect(e *exprpb.Expr) {
- sel := e.GetSelectExpr()
+func (c *checker) checkSelect(e ast.Expr) {
+ sel := e.AsSelect()
// Before traversing down the tree, try to interpret as qualified name.
qname, found := containers.ToQualifiedName(e)
if found {
@@ -170,31 +130,37 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
// variable name.
c.setType(e, ident.Type())
c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value()))
- identName := ident.Name()
- e.ExprKind = &exprpb.Expr_IdentExpr{
- IdentExpr: &exprpb.Expr_Ident{
- Name: identName,
- },
- }
+ e.SetKindCase(c.NewIdent(e.ID(), ident.Name()))
return
}
}
- resultType := c.checkSelectField(e, sel.GetOperand(), sel.GetField(), false)
- if sel.TestOnly {
+ resultType := c.checkSelectField(e, sel.Operand(), sel.FieldName(), false)
+ if sel.IsTestOnly() {
resultType = types.BoolType
}
c.setType(e, substitute(c.mappings, resultType, false))
}
-func (c *checker) checkOptSelect(e *exprpb.Expr) {
+func (c *checker) checkOptSelect(e ast.Expr) {
// Collect metadata related to the opt select call packaged by the parser.
- call := e.GetCallExpr()
- operand := call.GetArgs()[0]
- field := call.GetArgs()[1]
+ call := e.AsCall()
+ if len(call.Args()) != 2 || call.IsMemberFunction() {
+ t := ""
+ if call.IsMemberFunction() {
+ t = " member call with"
+ }
+ c.errors.notAnOptionalFieldSelectionCall(e.ID(), c.location(e),
+ fmt.Sprintf(
+ "incorrect signature.%s argument count: %d", t, len(call.Args())))
+ return
+ }
+
+ operand := call.Args()[0]
+ field := call.Args()[1]
fieldName, isString := maybeUnwrapString(field)
if !isString {
- c.errors.notAnOptionalFieldSelection(field.GetId(), c.location(field), field)
+ c.errors.notAnOptionalFieldSelection(field.ID(), c.location(field), field)
return
}
@@ -204,7 +170,7 @@ func (c *checker) checkOptSelect(e *exprpb.Expr) {
c.setReference(e, ast.NewFunctionReference("select_optional_field"))
}
-func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *types.Type {
+func (c *checker) checkSelectField(e, operand ast.Expr, field string, optional bool) *types.Type {
// Interpret as field selection, first traversing down the operand.
c.check(operand)
operandType := substitute(c.mappings, c.getType(operand), false)
@@ -222,7 +188,7 @@ func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, option
// Objects yield their field type declaration as the selection result type, but only if
// the field is defined.
messageType := targetType
- if fieldType, found := c.lookupFieldType(e.GetId(), messageType.TypeName(), field); found {
+ if fieldType, found := c.lookupFieldType(e.ID(), messageType.TypeName(), field); found {
resultType = fieldType
}
case types.TypeParamKind:
@@ -236,7 +202,7 @@ func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, option
// Dynamic / error values are treated as DYN type. Errors are handled this way as well
// in order to allow forward progress on the check.
if !isDynOrError(targetType) {
- c.errors.typeDoesNotSupportFieldSelection(e.GetId(), c.location(e), targetType)
+ c.errors.typeDoesNotSupportFieldSelection(e.ID(), c.location(e), targetType)
}
resultType = types.DynType
}
@@ -248,35 +214,34 @@ func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, option
return resultType
}
-func (c *checker) checkCall(e *exprpb.Expr) {
+func (c *checker) checkCall(e ast.Expr) {
// Note: similar logic exists within the `interpreter/planner.go`. If making changes here
// please consider the impact on planner.go and consolidate implementations or mirror code
// as appropriate.
- call := e.GetCallExpr()
- fnName := call.GetFunction()
+ call := e.AsCall()
+ fnName := call.FunctionName()
if fnName == operators.OptSelect {
c.checkOptSelect(e)
return
}
- args := call.GetArgs()
+ args := call.Args()
// Traverse arguments.
for _, arg := range args {
c.check(arg)
}
- target := call.GetTarget()
// Regular static call with simple name.
- if target == nil {
+ if !call.IsMemberFunction() {
// Check for the existence of the function.
fn := c.env.LookupFunction(fnName)
if fn == nil {
- c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName)
+ c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName)
c.setType(e, types.ErrorType)
return
}
// Overwrite the function name with its fully qualified resolved name.
- call.Function = fn.Name()
+ e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...))
// Check to see whether the overload resolves.
c.resolveOverloadOrError(e, fn, nil, args)
return
@@ -287,6 +252,7 @@ func (c *checker) checkCall(e *exprpb.Expr) {
// target a.b.
//
// Check whether the target is a namespaced function name.
+ target := call.Target()
qualifiedPrefix, maybeQualified := containers.ToQualifiedName(target)
if maybeQualified {
maybeQualifiedName := qualifiedPrefix + "." + fnName
@@ -295,15 +261,14 @@ func (c *checker) checkCall(e *exprpb.Expr) {
// The function name is namespaced and so preserving the target operand would
// be an inaccurate representation of the desired evaluation behavior.
// Overwrite with fully-qualified resolved function name sans receiver target.
- call.Target = nil
- call.Function = fn.Name()
+ e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...))
c.resolveOverloadOrError(e, fn, nil, args)
return
}
}
// Regular instance call.
- c.check(call.Target)
+ c.check(target)
fn := c.env.LookupFunction(fnName)
// Function found, attempt overload resolution.
if fn != nil {
@@ -312,11 +277,11 @@ func (c *checker) checkCall(e *exprpb.Expr) {
}
// Function name not declared, record error.
c.setType(e, types.ErrorType)
- c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName)
+ c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName)
}
func (c *checker) resolveOverloadOrError(
- e *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) {
+ e ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) {
// Attempt to resolve the overload.
resolution := c.resolveOverload(e, fn, target, args)
// No such overload, error noted in the resolveOverload call, type recorded here.
@@ -330,7 +295,7 @@ func (c *checker) resolveOverloadOrError(
}
func (c *checker) resolveOverload(
- call *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution {
+ call ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) *overloadResolution {
var argTypes []*types.Type
if target != nil {
@@ -362,8 +327,8 @@ func (c *checker) resolveOverload(
for i, argType := range argTypes {
if !c.isAssignable(argType, types.BoolType) {
c.errors.typeMismatch(
- args[i].GetId(),
- c.locationByID(args[i].GetId()),
+ args[i].ID(),
+ c.locationByID(args[i].ID()),
types.BoolType,
argType)
resultType = types.ErrorType
@@ -408,29 +373,29 @@ func (c *checker) resolveOverload(
for i, argType := range argTypes {
argTypes[i] = substitute(c.mappings, argType, true)
}
- c.errors.noMatchingOverload(call.GetId(), c.location(call), fn.Name(), argTypes, target != nil)
+ c.errors.noMatchingOverload(call.ID(), c.location(call), fn.Name(), argTypes, target != nil)
return nil
}
return newResolution(checkedRef, resultType)
}
-func (c *checker) checkCreateList(e *exprpb.Expr) {
- create := e.GetListExpr()
+func (c *checker) checkCreateList(e ast.Expr) {
+ create := e.AsList()
var elemsType *types.Type
- optionalIndices := create.GetOptionalIndices()
+ optionalIndices := create.OptionalIndices()
optionals := make(map[int32]bool, len(optionalIndices))
for _, optInd := range optionalIndices {
optionals[optInd] = true
}
- for i, e := range create.GetElements() {
+ for i, e := range create.Elements() {
c.check(e)
elemType := c.getType(e)
if optionals[int32(i)] {
var isOptional bool
elemType, isOptional = maybeUnwrapOptional(elemType)
if !isOptional && !isDyn(elemType) {
- c.errors.typeMismatch(e.GetId(), c.location(e), types.NewOptionalType(elemType), elemType)
+ c.errors.typeMismatch(e.ID(), c.location(e), types.NewOptionalType(elemType), elemType)
}
}
elemsType = c.joinTypes(e, elemsType, elemType)
@@ -442,32 +407,24 @@ func (c *checker) checkCreateList(e *exprpb.Expr) {
c.setType(e, types.NewListType(elemsType))
}
-func (c *checker) checkCreateStruct(e *exprpb.Expr) {
- str := e.GetStructExpr()
- if str.GetMessageName() != "" {
- c.checkCreateMessage(e)
- } else {
- c.checkCreateMap(e)
- }
-}
-
-func (c *checker) checkCreateMap(e *exprpb.Expr) {
- mapVal := e.GetStructExpr()
+func (c *checker) checkCreateMap(e ast.Expr) {
+ mapVal := e.AsMap()
var mapKeyType *types.Type
var mapValueType *types.Type
- for _, ent := range mapVal.GetEntries() {
- key := ent.GetMapKey()
+ for _, e := range mapVal.Entries() {
+ entry := e.AsMapEntry()
+ key := entry.Key()
c.check(key)
mapKeyType = c.joinTypes(key, mapKeyType, c.getType(key))
- val := ent.GetValue()
+ val := entry.Value()
c.check(val)
valType := c.getType(val)
- if ent.GetOptionalEntry() {
+ if entry.IsOptional() {
var isOptional bool
valType, isOptional = maybeUnwrapOptional(valType)
if !isOptional && !isDyn(valType) {
- c.errors.typeMismatch(val.GetId(), c.location(val), types.NewOptionalType(valType), valType)
+ c.errors.typeMismatch(val.ID(), c.location(val), types.NewOptionalType(valType), valType)
}
}
mapValueType = c.joinTypes(val, mapValueType, valType)
@@ -480,25 +437,28 @@ func (c *checker) checkCreateMap(e *exprpb.Expr) {
c.setType(e, types.NewMapType(mapKeyType, mapValueType))
}
-func (c *checker) checkCreateMessage(e *exprpb.Expr) {
- msgVal := e.GetStructExpr()
+func (c *checker) checkCreateStruct(e ast.Expr) {
+ msgVal := e.AsStruct()
// Determine the type of the message.
resultType := types.ErrorType
- ident := c.env.LookupIdent(msgVal.GetMessageName())
+ ident := c.env.LookupIdent(msgVal.TypeName())
if ident == nil {
c.errors.undeclaredReference(
- e.GetId(), c.location(e), c.env.container.Name(), msgVal.GetMessageName())
+ e.ID(), c.location(e), c.env.container.Name(), msgVal.TypeName())
c.setType(e, types.ErrorType)
return
}
// Ensure the type name is fully qualified in the AST.
typeName := ident.Name()
- msgVal.MessageName = typeName
- c.setReference(e, ast.NewIdentReference(ident.Name(), nil))
+ if msgVal.TypeName() != typeName {
+ e.SetKindCase(c.NewStruct(e.ID(), typeName, msgVal.Fields()))
+ msgVal = e.AsStruct()
+ }
+ c.setReference(e, ast.NewIdentReference(typeName, nil))
identKind := ident.Type().Kind()
if identKind != types.ErrorKind {
if identKind != types.TypeKind {
- c.errors.notAType(e.GetId(), c.location(e), ident.Type().DeclaredTypeName())
+ c.errors.notAType(e.ID(), c.location(e), ident.Type().DeclaredTypeName())
} else {
resultType = ident.Type().Parameters()[0]
// Backwards compatibility test between well-known types and message types
@@ -509,7 +469,7 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) {
} else if resultType.Kind() == types.StructKind {
typeName = resultType.DeclaredTypeName()
} else {
- c.errors.notAMessageType(e.GetId(), c.location(e), resultType.DeclaredTypeName())
+ c.errors.notAMessageType(e.ID(), c.location(e), resultType.DeclaredTypeName())
resultType = types.ErrorType
}
}
@@ -517,45 +477,62 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) {
c.setType(e, resultType)
// Check the field initializers.
- for _, ent := range msgVal.GetEntries() {
- field := ent.GetFieldKey()
- value := ent.GetValue()
+ for _, f := range msgVal.Fields() {
+ field := f.AsStructField()
+ fieldName := field.Name()
+ value := field.Value()
c.check(value)
fieldType := types.ErrorType
- ft, found := c.lookupFieldType(ent.GetId(), typeName, field)
+ ft, found := c.lookupFieldType(f.ID(), typeName, fieldName)
if found {
fieldType = ft
}
valType := c.getType(value)
- if ent.GetOptionalEntry() {
+ if field.IsOptional() {
var isOptional bool
valType, isOptional = maybeUnwrapOptional(valType)
if !isOptional && !isDyn(valType) {
- c.errors.typeMismatch(value.GetId(), c.location(value), types.NewOptionalType(valType), valType)
+ c.errors.typeMismatch(value.ID(), c.location(value), types.NewOptionalType(valType), valType)
}
}
if !c.isAssignable(fieldType, valType) {
- c.errors.fieldTypeMismatch(ent.GetId(), c.locationByID(ent.GetId()), field, fieldType, valType)
+ c.errors.fieldTypeMismatch(f.ID(), c.locationByID(f.ID()), fieldName, fieldType, valType)
}
}
}
-func (c *checker) checkComprehension(e *exprpb.Expr) {
- comp := e.GetComprehensionExpr()
- c.check(comp.GetIterRange())
- c.check(comp.GetAccuInit())
- accuType := c.getType(comp.GetAccuInit())
- rangeType := substitute(c.mappings, c.getType(comp.GetIterRange()), false)
- var varType *types.Type
+func (c *checker) checkComprehension(e ast.Expr) {
+ comp := e.AsComprehension()
+ c.check(comp.IterRange())
+ c.check(comp.AccuInit())
+ rangeType := substitute(c.mappings, c.getType(comp.IterRange()), false)
+ // Create a scope for the comprehension since it has a local accumulation variable.
+ // This scope will contain the accumulation variable used to compute the result.
+ accuType := c.getType(comp.AccuInit())
+ c.env = c.env.enterScope()
+ c.env.AddIdents(decls.NewVariable(comp.AccuVar(), accuType))
+
+ var varType, var2Type *types.Type
switch rangeType.Kind() {
case types.ListKind:
+ // varType represents the list element type for one-variable comprehensions.
varType = rangeType.Parameters()[0]
+ if comp.HasIterVar2() {
+ // varType represents the list index (int) for two-variable comprehensions,
+ // and var2Type represents the list element type.
+ var2Type = varType
+ varType = types.IntType
+ }
case types.MapKind:
- // Ranges over the keys.
+ // varType represents the map entry key for all comprehension types.
varType = rangeType.Parameters()[0]
+ if comp.HasIterVar2() {
+ // var2Type represents the map entry value for two-variable comprehensions.
+ var2Type = rangeType.Parameters()[1]
+ }
case types.DynKind, types.ErrorKind, types.TypeParamKind:
// Set the range type to DYN to prevent assignment to a potentially incorrect type
// at a later point in type-checking. The isAssignable call will update the type
@@ -563,33 +540,38 @@ func (c *checker) checkComprehension(e *exprpb.Expr) {
c.isAssignable(types.DynType, rangeType)
// Set the range iteration variable to type DYN as well.
varType = types.DynType
+ if comp.HasIterVar2() {
+ var2Type = types.DynType
+ }
default:
- c.errors.notAComprehensionRange(comp.GetIterRange().GetId(), c.location(comp.GetIterRange()), rangeType)
+ c.errors.notAComprehensionRange(comp.IterRange().ID(), c.location(comp.IterRange()), rangeType)
varType = types.ErrorType
+ if comp.HasIterVar2() {
+ var2Type = types.ErrorType
+ }
}
- // Create a scope for the comprehension since it has a local accumulation variable.
- // This scope will contain the accumulation variable used to compute the result.
- c.env = c.env.enterScope()
- c.env.AddIdents(decls.NewVariable(comp.GetAccuVar(), accuType))
// Create a block scope for the loop.
c.env = c.env.enterScope()
- c.env.AddIdents(decls.NewVariable(comp.GetIterVar(), varType))
+ c.env.AddIdents(decls.NewVariable(comp.IterVar(), varType))
+ if comp.HasIterVar2() {
+ c.env.AddIdents(decls.NewVariable(comp.IterVar2(), var2Type))
+ }
// Check the variable references in the condition and step.
- c.check(comp.GetLoopCondition())
- c.assertType(comp.GetLoopCondition(), types.BoolType)
- c.check(comp.GetLoopStep())
- c.assertType(comp.GetLoopStep(), accuType)
+ c.check(comp.LoopCondition())
+ c.assertType(comp.LoopCondition(), types.BoolType)
+ c.check(comp.LoopStep())
+ c.assertType(comp.LoopStep(), accuType)
// Exit the loop's block scope before checking the result.
c.env = c.env.exitScope()
- c.check(comp.GetResult())
+ c.check(comp.Result())
// Exit the comprehension scope.
c.env = c.env.exitScope()
- c.setType(e, substitute(c.mappings, c.getType(comp.GetResult()), false))
+ c.setType(e, substitute(c.mappings, c.getType(comp.Result()), false))
}
// Checks compatibility of joined types, and returns the most general common type.
-func (c *checker) joinTypes(e *exprpb.Expr, previous, current *types.Type) *types.Type {
+func (c *checker) joinTypes(e ast.Expr, previous, current *types.Type) *types.Type {
if previous == nil {
return current
}
@@ -599,7 +581,7 @@ func (c *checker) joinTypes(e *exprpb.Expr, previous, current *types.Type) *type
if c.dynAggregateLiteralElementTypesEnabled() {
return types.DynType
}
- c.errors.typeMismatch(e.GetId(), c.location(e), previous, current)
+ c.errors.typeMismatch(e.ID(), c.location(e), previous, current)
return types.ErrorType
}
@@ -633,41 +615,41 @@ func (c *checker) isAssignableList(l1, l2 []*types.Type) bool {
return false
}
-func maybeUnwrapString(e *exprpb.Expr) (string, bool) {
- switch e.GetExprKind().(type) {
- case *exprpb.Expr_ConstExpr:
- literal := e.GetConstExpr()
- switch literal.GetConstantKind().(type) {
- case *exprpb.Constant_StringValue:
- return literal.GetStringValue(), true
+func maybeUnwrapString(e ast.Expr) (string, bool) {
+ switch e.Kind() {
+ case ast.LiteralKind:
+ literal := e.AsLiteral()
+ switch v := literal.(type) {
+ case types.String:
+ return string(v), true
}
}
return "", false
}
-func (c *checker) setType(e *exprpb.Expr, t *types.Type) {
- if old, found := c.types[e.GetId()]; found && !old.IsExactType(t) {
- c.errors.incompatibleType(e.GetId(), c.location(e), e, old, t)
+func (c *checker) setType(e ast.Expr, t *types.Type) {
+ if old, found := c.TypeMap()[e.ID()]; found && !old.IsExactType(t) {
+ c.errors.incompatibleType(e.ID(), c.location(e), e, old, t)
return
}
- c.types[e.GetId()] = t
+ c.SetType(e.ID(), t)
}
-func (c *checker) getType(e *exprpb.Expr) *types.Type {
- return c.types[e.GetId()]
+func (c *checker) getType(e ast.Expr) *types.Type {
+ return c.TypeMap()[e.ID()]
}
-func (c *checker) setReference(e *exprpb.Expr, r *ast.ReferenceInfo) {
- if old, found := c.references[e.GetId()]; found && !old.Equals(r) {
- c.errors.referenceRedefinition(e.GetId(), c.location(e), e, old, r)
+func (c *checker) setReference(e ast.Expr, r *ast.ReferenceInfo) {
+ if old, found := c.ReferenceMap()[e.ID()]; found && !old.Equals(r) {
+ c.errors.referenceRedefinition(e.ID(), c.location(e), e, old, r)
return
}
- c.references[e.GetId()] = r
+ c.SetReference(e.ID(), r)
}
-func (c *checker) assertType(e *exprpb.Expr, t *types.Type) {
+func (c *checker) assertType(e ast.Expr, t *types.Type) {
if !c.isAssignable(t, c.getType(e)) {
- c.errors.typeMismatch(e.GetId(), c.location(e), t, c.getType(e))
+ c.errors.typeMismatch(e.ID(), c.location(e), t, c.getType(e))
}
}
@@ -683,26 +665,12 @@ func newResolution(r *ast.ReferenceInfo, t *types.Type) *overloadResolution {
}
}
-func (c *checker) location(e *exprpb.Expr) common.Location {
- return c.locationByID(e.GetId())
+func (c *checker) location(e ast.Expr) common.Location {
+ return c.locationByID(e.ID())
}
func (c *checker) locationByID(id int64) common.Location {
- positions := c.sourceInfo.GetPositions()
- var line = 1
- if offset, found := positions[id]; found {
- col := int(offset)
- for _, lineOffset := range c.sourceInfo.GetLineOffsets() {
- if lineOffset < offset {
- line++
- col = int(offset - lineOffset)
- } else {
- break
- }
- }
- return common.NewLocation(line, col)
- }
- return common.NoLocation
+ return c.SourceInfo().GetStartLocation(id)
}
func (c *checker) lookupFieldType(exprID int64, structType, fieldName string) (*types.Type, bool) {
diff --git a/vendor/github.com/google/cel-go/checker/cost.go b/vendor/github.com/google/cel-go/checker/cost.go
index fd3f73505..5bc6318ed 100644
--- a/vendor/github.com/google/cel-go/checker/cost.go
+++ b/vendor/github.com/google/cel-go/checker/cost.go
@@ -22,23 +22,26 @@ import (
"github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/parser"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// WARNING: Any changes to cost calculations in this file require a corresponding change in interpreter/runtimecost.go
// CostEstimator estimates the sizes of variable length input data and the costs of functions.
type CostEstimator interface {
- // EstimateSize returns a SizeEstimate for the given AstNode, or nil if
- // the estimator has no estimate to provide. The size is equivalent to the result of the CEL `size()` function:
- // length of strings and bytes, number of map entries or number of list items.
- // EstimateSize is only called for AstNodes where
- // CEL does not know the size; EstimateSize is not called for values defined inline in CEL where the size
- // is already obvious to CEL.
+ // EstimateSize returns a SizeEstimate for the given AstNode, or nil if the estimator has no
+ // estimate to provide.
+ //
+ // The size is equivalent to the result of the CEL `size()` function:
+ // * Number of unicode characters in a string
+ // * Number of bytes in a sequence
+ // * Number of map entries or number of list items.
+ //
+ // EstimateSize is only called for AstNodes where CEL does not know the size; EstimateSize is not
+ // called for values defined inline in CEL where the size is already obvious to CEL.
EstimateSize(element AstNode) *SizeEstimate
- // EstimateCallCost returns the estimated cost of an invocation, or nil if
- // the estimator has no estimate to provide.
+
+ // EstimateCallCost returns the estimated cost of an invocation, or nil if the estimator has no
+ // estimate to provide.
EstimateCallCost(function, overloadID string, target *AstNode, args []AstNode) *CallEstimate
}
@@ -46,6 +49,7 @@ type CostEstimator interface {
// The ResultSize should only be provided if the call results in a map, list, string or bytes.
type CallEstimate struct {
CostEstimate
+
ResultSize *SizeEstimate
}
@@ -55,10 +59,13 @@ type AstNode interface {
// represent type directly reachable from the provided type declarations.
// The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'.
Path() []string
+
// Type returns the deduced type of the AstNode.
Type() *types.Type
+
// Expr returns the expression of the AstNode.
- Expr() *exprpb.Expr
+ Expr() ast.Expr
+
// ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression.
// For constants and inline list and map declarations, the exact size is returned. For concatenated list, strings
// and bytes, the size is derived from the size estimates of the operands. nil is returned if there is no
@@ -69,7 +76,7 @@ type AstNode interface {
type astNode struct {
path []string
t *types.Type
- expr *exprpb.Expr
+ expr ast.Expr
derivedSize *SizeEstimate
}
@@ -81,43 +88,12 @@ func (e astNode) Type() *types.Type {
return e.t
}
-func (e astNode) Expr() *exprpb.Expr {
+func (e astNode) Expr() ast.Expr {
return e.expr
}
func (e astNode) ComputedSize() *SizeEstimate {
- if e.derivedSize != nil {
- return e.derivedSize
- }
- var v uint64
- switch ek := e.expr.GetExprKind().(type) {
- case *exprpb.Expr_ConstExpr:
- switch ck := ek.ConstExpr.GetConstantKind().(type) {
- case *exprpb.Constant_StringValue:
- // converting to runes here is an O(n) operation, but
- // this is consistent with how size is computed at runtime,
- // and how the language definition defines string size
- v = uint64(len([]rune(ck.StringValue)))
- case *exprpb.Constant_BytesValue:
- v = uint64(len(ck.BytesValue))
- case *exprpb.Constant_BoolValue, *exprpb.Constant_DoubleValue, *exprpb.Constant_DurationValue,
- *exprpb.Constant_Int64Value, *exprpb.Constant_TimestampValue, *exprpb.Constant_Uint64Value,
- *exprpb.Constant_NullValue:
- v = uint64(1)
- default:
- return nil
- }
- case *exprpb.Expr_ListExpr:
- v = uint64(len(ek.ListExpr.GetElements()))
- case *exprpb.Expr_StructExpr:
- if ek.StructExpr.GetMessageName() == "" {
- v = uint64(len(ek.StructExpr.GetEntries()))
- }
- default:
- return nil
- }
-
- return &SizeEstimate{Min: v, Max: v}
+ return e.derivedSize
}
// SizeEstimate represents an estimated size of a variable length string, bytes, map or list.
@@ -125,6 +101,16 @@ type SizeEstimate struct {
Min, Max uint64
}
+// UnknownSizeEstimate returns a size between 0 and max uint
+func UnknownSizeEstimate() SizeEstimate {
+ return unknownSizeEstimate
+}
+
+// FixedSizeEstimate returns a size estimate with a fixed min and max range.
+func FixedSizeEstimate(size uint64) SizeEstimate {
+ return SizeEstimate{Min: size, Max: size}
+}
+
// Add adds to another SizeEstimate and returns the sum.
// If add would result in an uint64 overflow, the result is math.MaxUint64.
func (se SizeEstimate) Add(sizeEstimate SizeEstimate) SizeEstimate {
@@ -179,12 +165,22 @@ type CostEstimate struct {
Min, Max uint64
}
+// UnknownCostEstimate returns a cost with an unknown impact.
+func UnknownCostEstimate() CostEstimate {
+ return unknownCostEstimate
+}
+
+// FixedCostEstimate returns a cost with a fixed min and max range.
+func FixedCostEstimate(cost uint64) CostEstimate {
+ return CostEstimate{Min: cost, Max: cost}
+}
+
// Add adds the costs and returns the sum.
// If add would result in an uint64 overflow for the min or max, the value is set to math.MaxUint64.
func (ce CostEstimate) Add(cost CostEstimate) CostEstimate {
return CostEstimate{
- addUint64NoOverflow(ce.Min, cost.Min),
- addUint64NoOverflow(ce.Max, cost.Max),
+ Min: addUint64NoOverflow(ce.Min, cost.Min),
+ Max: addUint64NoOverflow(ce.Max, cost.Max),
}
}
@@ -192,8 +188,8 @@ func (ce CostEstimate) Add(cost CostEstimate) CostEstimate {
// If multiply would result in an uint64 overflow, the result is math.MaxUint64.
func (ce CostEstimate) Multiply(cost CostEstimate) CostEstimate {
return CostEstimate{
- multiplyUint64NoOverflow(ce.Min, cost.Min),
- multiplyUint64NoOverflow(ce.Max, cost.Max),
+ Min: multiplyUint64NoOverflow(ce.Min, cost.Min),
+ Max: multiplyUint64NoOverflow(ce.Max, cost.Max),
}
}
@@ -201,8 +197,8 @@ func (ce CostEstimate) Multiply(cost CostEstimate) CostEstimate {
// nearest integer of the result, rounded up.
func (ce CostEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate {
return CostEstimate{
- multiplyByCostFactor(ce.Min, costPerUnit),
- multiplyByCostFactor(ce.Max, costPerUnit),
+ Min: multiplyByCostFactor(ce.Min, costPerUnit),
+ Max: multiplyByCostFactor(ce.Max, costPerUnit),
}
}
@@ -249,49 +245,6 @@ func multiplyByCostFactor(x uint64, y float64) uint64 {
return uint64(ceil)
}
-var (
- selectAndIdentCost = CostEstimate{Min: common.SelectAndIdentCost, Max: common.SelectAndIdentCost}
- constCost = CostEstimate{Min: common.ConstCost, Max: common.ConstCost}
-
- createListBaseCost = CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost}
- createMapBaseCost = CostEstimate{Min: common.MapCreateBaseCost, Max: common.MapCreateBaseCost}
- createMessageBaseCost = CostEstimate{Min: common.StructCreateBaseCost, Max: common.StructCreateBaseCost}
-)
-
-type coster struct {
- // exprPath maps from Expr Id to field path.
- exprPath map[int64][]string
- // iterRanges tracks the iterRange of each iterVar.
- iterRanges iterRangeScopes
- // computedSizes tracks the computed sizes of call results.
- computedSizes map[int64]SizeEstimate
- checkedAST *ast.CheckedAST
- estimator CostEstimator
- overloadEstimators map[string]FunctionEstimator
- // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations.
- presenceTestCost CostEstimate
-}
-
-// Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names.
-type iterRangeScopes map[string][]int64
-
-func (vs iterRangeScopes) push(varName string, expr *exprpb.Expr) {
- vs[varName] = append(vs[varName], expr.GetId())
-}
-
-func (vs iterRangeScopes) pop(varName string) {
- varStack := vs[varName]
- vs[varName] = varStack[:len(varStack)-1]
-}
-
-func (vs iterRangeScopes) peek(varName string) (int64, bool) {
- varStack := vs[varName]
- if len(varStack) > 0 {
- return varStack[len(varStack)-1], true
- }
- return 0, false
-}
-
// CostOption configures flags which affect cost computations.
type CostOption func(*coster) error
@@ -304,7 +257,7 @@ func PresenceTestHasCost(hasCost bool) CostOption {
c.presenceTestCost = selectAndIdentCost
return nil
}
- c.presenceTestCost = CostEstimate{Min: 0, Max: 0}
+ c.presenceTestCost = FixedCostEstimate(0)
return nil
}
}
@@ -324,15 +277,16 @@ func OverloadCostEstimate(overloadID string, functionCoster FunctionEstimator) C
}
// Cost estimates the cost of the parsed and type checked CEL expression.
-func Cost(checker *ast.CheckedAST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) {
+func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) {
c := &coster{
- checkedAST: checker,
+ checkedAST: checked,
estimator: estimator,
overloadEstimators: map[string]FunctionEstimator{},
- exprPath: map[int64][]string{},
- iterRanges: map[string][]int64{},
+ exprPaths: map[int64][]string{},
+ localVars: make(scopes),
computedSizes: map[int64]SizeEstimate{},
- presenceTestCost: CostEstimate{Min: 1, Max: 1},
+ computedEntrySizes: map[int64]entrySizeEstimate{},
+ presenceTestCost: FixedCostEstimate(1),
}
for _, opt := range opts {
err := opt(c)
@@ -340,83 +294,244 @@ func Cost(checker *ast.CheckedAST, estimator CostEstimator, opts ...CostOption)
return CostEstimate{}, err
}
}
- return c.cost(checker.Expr), nil
+ return c.cost(checked.Expr()), nil
+}
+
+type coster struct {
+ // exprPaths maps from Expr Id to field path.
+ exprPaths map[int64][]string
+ // localVars tracks the local and iteration variables assigned during evaluation.
+ localVars scopes
+ // computedSizes tracks the computed sizes of call results.
+ computedSizes map[int64]SizeEstimate
+ // computedEntrySizes tracks the size of list and map entries
+ computedEntrySizes map[int64]entrySizeEstimate
+
+ checkedAST *ast.AST
+ estimator CostEstimator
+ overloadEstimators map[string]FunctionEstimator
+ // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations.
+ presenceTestCost CostEstimate
+}
+
+// entrySizeEstimate captures the container kind and associated key/index and value SizeEstimate values.
+//
+// An entrySizeEstimate only exists if both the key/index and the value have SizeEstimate values, otherwise
+// a nil entrySizeEstimate should be used.
+type entrySizeEstimate struct {
+ containerKind types.Kind
+ key SizeEstimate
+ val SizeEstimate
+}
+
+// container returns the container kind (list or map) of the entry.
+func (s *entrySizeEstimate) container() types.Kind {
+ if s == nil {
+ return types.UnknownKind
+ }
+ return s.containerKind
+}
+
+// keySize returns the SizeEstimate for the key if one exists.
+func (s *entrySizeEstimate) keySize() *SizeEstimate {
+ if s == nil {
+ return nil
+ }
+ return &s.key
+}
+
+// valSize returns the SizeEstimate for the value if one exists.
+func (s *entrySizeEstimate) valSize() *SizeEstimate {
+ if s == nil {
+ return nil
+ }
+ return &s.val
+}
+
+func (s *entrySizeEstimate) union(other *entrySizeEstimate) *entrySizeEstimate {
+ if s == nil || other == nil {
+ return nil
+ }
+ sk := s.key.Union(other.key)
+ sv := s.val.Union(other.val)
+ return &entrySizeEstimate{
+ containerKind: s.containerKind,
+ key: sk,
+ val: sv,
+ }
+}
+
+// localVar captures the local variable size and entrySize estimates if they exist for variables
+type localVar struct {
+ exprID int64
+ path []string
+ size *SizeEstimate
+ entrySize *entrySizeEstimate
+}
+
+// scopes is a stack of variable name to integer id stack to handle scopes created by cel.bind() like macros
+type scopes map[string][]*localVar
+
+func (s scopes) push(varName string, expr ast.Expr, path []string, size *SizeEstimate, entrySize *entrySizeEstimate) {
+ s[varName] = append(s[varName], &localVar{
+ exprID: expr.ID(),
+ path: path,
+ size: size,
+ entrySize: entrySize,
+ })
+}
+
+func (s scopes) pop(varName string) {
+ varStack := s[varName]
+ s[varName] = varStack[:len(varStack)-1]
+}
+
+func (s scopes) peek(varName string) (*localVar, bool) {
+ varStack := s[varName]
+ if len(varStack) > 0 {
+ return varStack[len(varStack)-1], true
+ }
+ return nil, false
+}
+
+func (c *coster) pushIterKey(varName string, rangeExpr ast.Expr) {
+ entrySize := c.computeEntrySize(rangeExpr)
+ size := entrySize.keySize()
+ path := c.getPath(rangeExpr)
+ container := entrySize.container()
+ if container == types.UnknownKind {
+ container = c.getType(rangeExpr).Kind()
+ }
+ subpath := "@keys"
+ if container == types.ListKind {
+ subpath = "@indices"
+ }
+ c.localVars.push(varName, rangeExpr, append(path, subpath), size, nil)
+}
+
+func (c *coster) pushIterValue(varName string, rangeExpr ast.Expr) {
+ entrySize := c.computeEntrySize(rangeExpr)
+ size := entrySize.valSize()
+ path := c.getPath(rangeExpr)
+ container := entrySize.container()
+ if container == types.UnknownKind {
+ container = c.getType(rangeExpr).Kind()
+ }
+ subpath := "@values"
+ if container == types.ListKind {
+ subpath = "@items"
+ }
+ c.localVars.push(varName, rangeExpr, append(path, subpath), size, nil)
}
-func (c *coster) cost(e *exprpb.Expr) CostEstimate {
+func (c *coster) pushIterSingle(varName string, rangeExpr ast.Expr) {
+ entrySize := c.computeEntrySize(rangeExpr)
+ size := entrySize.keySize()
+ subpath := "@keys"
+ container := entrySize.container()
+ if container == types.UnknownKind {
+ container = c.getType(rangeExpr).Kind()
+ }
+ if container == types.ListKind {
+ size = entrySize.valSize()
+ subpath = "@items"
+ }
+ path := c.getPath(rangeExpr)
+ c.localVars.push(varName, rangeExpr, append(path, subpath), size, nil)
+}
+
+func (c *coster) pushLocalVar(varName string, e ast.Expr) {
+ path := c.getPath(e)
+ // note: retrieve the entry size for the local variable based on the size of the binding expression
+ // since the binding expression could be a list or map, the entry size should also be propagated
+ entrySize := c.computeEntrySize(e)
+ c.localVars.push(varName, e, path, c.computeSize(e), entrySize)
+}
+
+func (c *coster) peekLocalVar(varName string) (*localVar, bool) {
+ return c.localVars.peek(varName)
+}
+
+func (c *coster) popLocalVar(varName string) {
+ c.localVars.pop(varName)
+}
+
+func (c *coster) cost(e ast.Expr) CostEstimate {
if e == nil {
return CostEstimate{}
}
var cost CostEstimate
- switch e.GetExprKind().(type) {
- case *exprpb.Expr_ConstExpr:
+ switch e.Kind() {
+ case ast.LiteralKind:
cost = constCost
- case *exprpb.Expr_IdentExpr:
+ case ast.IdentKind:
cost = c.costIdent(e)
- case *exprpb.Expr_SelectExpr:
+ case ast.SelectKind:
cost = c.costSelect(e)
- case *exprpb.Expr_CallExpr:
+ case ast.CallKind:
cost = c.costCall(e)
- case *exprpb.Expr_ListExpr:
+ case ast.ListKind:
cost = c.costCreateList(e)
- case *exprpb.Expr_StructExpr:
+ case ast.MapKind:
+ cost = c.costCreateMap(e)
+ case ast.StructKind:
cost = c.costCreateStruct(e)
- case *exprpb.Expr_ComprehensionExpr:
- cost = c.costComprehension(e)
+ case ast.ComprehensionKind:
+ if c.isBind(e) {
+ cost = c.costBind(e)
+ } else {
+ cost = c.costComprehension(e)
+ }
default:
return CostEstimate{}
}
return cost
}
-func (c *coster) costIdent(e *exprpb.Expr) CostEstimate {
- identExpr := e.GetIdentExpr()
-
+func (c *coster) costIdent(e ast.Expr) CostEstimate {
+ identName := e.AsIdent()
// build and track the field path
- if iterRange, ok := c.iterRanges.peek(identExpr.GetName()); ok {
- switch c.checkedAST.TypeMap[iterRange].Kind() {
- case types.ListKind:
- c.addPath(e, append(c.exprPath[iterRange], "@items"))
- case types.MapKind:
- c.addPath(e, append(c.exprPath[iterRange], "@keys"))
- }
+ if v, ok := c.peekLocalVar(identName); ok {
+ c.addPath(e, v.path)
} else {
- c.addPath(e, []string{identExpr.GetName()})
+ c.addPath(e, []string{identName})
}
-
return selectAndIdentCost
}
-func (c *coster) costSelect(e *exprpb.Expr) CostEstimate {
- sel := e.GetSelectExpr()
+func (c *coster) costSelect(e ast.Expr) CostEstimate {
+ sel := e.AsSelect()
var sum CostEstimate
- if sel.GetTestOnly() {
+ if sel.IsTestOnly() {
// recurse, but do not add any cost
// this is equivalent to how evalTestOnly increments the runtime cost counter
// but does not add any additional cost for the qualifier, except here we do
// the reverse (ident adds cost)
sum = sum.Add(c.presenceTestCost)
- sum = sum.Add(c.cost(sel.GetOperand()))
+ sum = sum.Add(c.cost(sel.Operand()))
return sum
}
- sum = sum.Add(c.cost(sel.GetOperand()))
- targetType := c.getType(sel.GetOperand())
+ sum = sum.Add(c.cost(sel.Operand()))
+ targetType := c.getType(sel.Operand())
switch targetType.Kind() {
case types.MapKind, types.StructKind, types.TypeParamKind:
sum = sum.Add(selectAndIdentCost)
}
// build and track the field path
- c.addPath(e, append(c.getPath(sel.GetOperand()), sel.GetField()))
-
+ c.addPath(e, append(c.getPath(sel.Operand()), sel.FieldName()))
return sum
}
-func (c *coster) costCall(e *exprpb.Expr) CostEstimate {
- call := e.GetCallExpr()
- target := call.GetTarget()
- args := call.GetArgs()
+func (c *coster) costCall(e ast.Expr) CostEstimate {
+ // Dyn is just a way to disable type-checking, so return the cost of 1 with the cost of the argument
+ if dynEstimate := c.maybeUnwrapDynCall(e); dynEstimate != nil {
+ return *dynEstimate
+ }
+ // Continue estimating the cost of all other calls.
+ call := e.AsCall()
+ args := call.Args()
var sum CostEstimate
argTypes := make([]AstNode, len(args))
@@ -426,22 +541,21 @@ func (c *coster) costCall(e *exprpb.Expr) CostEstimate {
argTypes[i] = c.newAstNode(arg)
}
- ref := c.checkedAST.ReferenceMap[e.GetId()]
- if ref == nil || len(ref.OverloadIDs) == 0 {
+ overloadIDs := c.checkedAST.GetOverloadIDs(e.ID())
+ if len(overloadIDs) == 0 {
return CostEstimate{}
}
- var targetType AstNode
- if target != nil {
- if call.Target != nil {
- sum = sum.Add(c.cost(call.GetTarget()))
- targetType = c.newAstNode(call.GetTarget())
- }
+ var targetType *AstNode
+ if call.IsMemberFunction() {
+ sum = sum.Add(c.cost(call.Target()))
+ var t AstNode = c.newAstNode(call.Target())
+ targetType = &t
}
// Pick a cost estimate range that covers all the overload cost estimation ranges
fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0}
var resultSize *SizeEstimate
- for _, overload := range ref.OverloadIDs {
- overloadCost := c.functionCost(call.GetFunction(), overload, &targetType, argTypes, argCosts)
+ for _, overload := range overloadIDs {
+ overloadCost := c.functionCost(e, call.FunctionName(), overload, targetType, argTypes, argCosts)
fnCost = fnCost.Union(overloadCost.CostEstimate)
if overloadCost.ResultSize != nil {
if resultSize == nil {
@@ -455,100 +569,161 @@ func (c *coster) costCall(e *exprpb.Expr) CostEstimate {
switch overload {
case overloads.IndexList:
if len(args) > 0 {
+ // note: assigning resultSize here could be redundant with the path-based lookup later
+ resultSize = c.computeEntrySize(args[0]).valSize()
c.addPath(e, append(c.getPath(args[0]), "@items"))
}
case overloads.IndexMap:
if len(args) > 0 {
+ resultSize = c.computeEntrySize(args[0]).valSize()
c.addPath(e, append(c.getPath(args[0]), "@values"))
}
}
+ if resultSize == nil {
+ resultSize = c.computeSize(e)
+ }
}
- if resultSize != nil {
- c.computedSizes[e.GetId()] = *resultSize
- }
+ c.setSize(e, resultSize)
return sum.Add(fnCost)
}
-func (c *coster) costCreateList(e *exprpb.Expr) CostEstimate {
- create := e.GetListExpr()
- var sum CostEstimate
- for _, e := range create.GetElements() {
- sum = sum.Add(c.cost(e))
+func (c *coster) maybeUnwrapDynCall(e ast.Expr) *CostEstimate {
+ call := e.AsCall()
+ if call.FunctionName() != "dyn" {
+ return nil
}
- return sum.Add(createListBaseCost)
+ arg := call.Args()[0]
+ argCost := c.cost(arg)
+ c.copySizeEstimates(e, arg)
+ callCost := FixedCostEstimate(1).Add(argCost)
+ return &callCost
}
-func (c *coster) costCreateStruct(e *exprpb.Expr) CostEstimate {
- str := e.GetStructExpr()
- if str.MessageName != "" {
- return c.costCreateMessage(e)
+func (c *coster) costCreateList(e ast.Expr) CostEstimate {
+ create := e.AsList()
+ var sum CostEstimate
+ itemSize := SizeEstimate{Min: math.MaxUint64, Max: 0}
+ if create.Size() == 0 {
+ itemSize.Min = 0
}
- return c.costCreateMap(e)
+ for _, e := range create.Elements() {
+ sum = sum.Add(c.cost(e))
+ is := c.sizeOrUnknown(e)
+ itemSize = itemSize.Union(is)
+ }
+ c.setEntrySize(e, &entrySizeEstimate{containerKind: types.ListKind, key: FixedSizeEstimate(1), val: itemSize})
+ return sum.Add(createListBaseCost)
}
-func (c *coster) costCreateMap(e *exprpb.Expr) CostEstimate {
- mapVal := e.GetStructExpr()
+func (c *coster) costCreateMap(e ast.Expr) CostEstimate {
+ mapVal := e.AsMap()
var sum CostEstimate
- for _, ent := range mapVal.GetEntries() {
- key := ent.GetMapKey()
- sum = sum.Add(c.cost(key))
-
- sum = sum.Add(c.cost(ent.GetValue()))
+ keySize := SizeEstimate{Min: math.MaxUint64, Max: 0}
+ valSize := SizeEstimate{Min: math.MaxUint64, Max: 0}
+ if mapVal.Size() == 0 {
+ valSize.Min = 0
+ keySize.Min = 0
+ }
+ for _, ent := range mapVal.Entries() {
+ entry := ent.AsMapEntry()
+ sum = sum.Add(c.cost(entry.Key()))
+ sum = sum.Add(c.cost(entry.Value()))
+ // Compute the key size range
+ ks := c.sizeOrUnknown(entry.Key())
+ keySize = keySize.Union(ks)
+ // Compute the value size range
+ vs := c.sizeOrUnknown(entry.Value())
+ valSize = valSize.Union(vs)
}
+ c.setEntrySize(e, &entrySizeEstimate{containerKind: types.MapKind, key: keySize, val: valSize})
return sum.Add(createMapBaseCost)
}
-func (c *coster) costCreateMessage(e *exprpb.Expr) CostEstimate {
- msgVal := e.GetStructExpr()
+func (c *coster) costCreateStruct(e ast.Expr) CostEstimate {
+ msgVal := e.AsStruct()
var sum CostEstimate
- for _, ent := range msgVal.GetEntries() {
- sum = sum.Add(c.cost(ent.GetValue()))
+ for _, ent := range msgVal.Fields() {
+ field := ent.AsStructField()
+ sum = sum.Add(c.cost(field.Value()))
}
return sum.Add(createMessageBaseCost)
}
-func (c *coster) costComprehension(e *exprpb.Expr) CostEstimate {
- comp := e.GetComprehensionExpr()
+func (c *coster) costComprehension(e ast.Expr) CostEstimate {
+ comp := e.AsComprehension()
var sum CostEstimate
- sum = sum.Add(c.cost(comp.GetIterRange()))
- sum = sum.Add(c.cost(comp.GetAccuInit()))
+ sum = sum.Add(c.cost(comp.IterRange()))
+ sum = sum.Add(c.cost(comp.AccuInit()))
+ c.pushLocalVar(comp.AccuVar(), comp.AccuInit())
+
+ // Track the iterRange of each IterVar and AccuVar for field path construction
+ if comp.HasIterVar2() {
+ c.pushIterKey(comp.IterVar(), comp.IterRange())
+ c.pushIterValue(comp.IterVar2(), comp.IterRange())
+ } else {
+ c.pushIterSingle(comp.IterVar(), comp.IterRange())
+ }
+
+ // Determine the cost for each element in the loop
+ loopCost := c.cost(comp.LoopCondition())
+ stepCost := c.cost(comp.LoopStep())
- // Track the iterRange of each IterVar for field path construction
- c.iterRanges.push(comp.GetIterVar(), comp.GetIterRange())
- loopCost := c.cost(comp.GetLoopCondition())
- stepCost := c.cost(comp.GetLoopStep())
- c.iterRanges.pop(comp.GetIterVar())
- sum = sum.Add(c.cost(comp.Result))
- rangeCnt := c.sizeEstimate(c.newAstNode(comp.GetIterRange()))
+ // Clear the intermediate variable tracking.
+ c.popLocalVar(comp.IterVar())
+ if comp.HasIterVar2() {
+ c.popLocalVar(comp.IterVar2())
+ }
- c.computedSizes[e.GetId()] = rangeCnt
+ // Determine the result cost.
+ sum = sum.Add(c.cost(comp.Result()))
+ c.localVars.pop(comp.AccuVar())
+ // Estimate the cost of the loop.
+ rangeCnt := c.sizeOrUnknown(comp.IterRange())
rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost))
sum = sum.Add(rangeCost)
+ switch k := comp.AccuInit().Kind(); k {
+ case ast.LiteralKind:
+ c.setSize(e, c.computeSize(comp.AccuInit()))
+ case ast.ListKind, ast.MapKind:
+ c.setSize(e, &rangeCnt)
+ // For a step which produces a container value, it will have an entry size associated
+ // with its expression id.
+ if stepEntrySize := c.computeEntrySize(comp.LoopStep()); stepEntrySize != nil {
+ c.setEntrySize(e, stepEntrySize)
+ break
+ }
+ }
return sum
}
-func (c *coster) sizeEstimate(t AstNode) SizeEstimate {
- if l := t.ComputedSize(); l != nil {
- return *l
- }
- if l := c.estimator.EstimateSize(t); l != nil {
- return *l
- }
- // return an estimate of 1 for return types of set
- // lengths, since strings/bytes/more complex objects could be of
- // variable length
- if isScalar(t.Type()) {
- // TODO: since the logic for size estimation is split between
- // ComputedSize and isScalar, changing one will likely require changing
- // the other, so they should be merged in the future if possible
- return SizeEstimate{Min: 1, Max: 1}
- }
- return SizeEstimate{Min: 0, Max: math.MaxUint64}
+func (c *coster) isBind(e ast.Expr) bool {
+ comp := e.AsComprehension()
+ iterRange := comp.IterRange()
+ loopCond := comp.LoopCondition()
+ return iterRange.Kind() == ast.ListKind && iterRange.AsList().Size() == 0 &&
+ loopCond.Kind() == ast.LiteralKind && loopCond.AsLiteral() == types.False &&
+ comp.AccuVar() != parser.AccumulatorName
+}
+
+func (c *coster) costBind(e ast.Expr) CostEstimate {
+ comp := e.AsComprehension()
+ var sum CostEstimate
+ // Binds are lazily initialized, so we retain the cost of an empty iteration range.
+ sum = sum.Add(c.cost(comp.IterRange()))
+ sum = sum.Add(c.cost(comp.AccuInit()))
+
+ c.pushLocalVar(comp.AccuVar(), comp.AccuInit())
+ sum = sum.Add(c.cost(comp.Result()))
+ c.popLocalVar(comp.AccuVar())
+
+ // Associate the bind output size with the result size.
+ c.copySizeEstimates(e, comp.Result())
+ return sum
}
-func (c *coster) functionCost(function, overloadID string, target *AstNode, args []AstNode, argCosts []CostEstimate) CallEstimate {
+func (c *coster) functionCost(e ast.Expr, function, overloadID string, target *AstNode, args []AstNode, argCosts []CostEstimate) CallEstimate {
argCostSum := func() CostEstimate {
var sum CostEstimate
for _, a := range argCosts {
@@ -573,35 +748,42 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
case overloads.ExtFormatString:
if target != nil {
// ResultSize not calculated because we can't bound the max size.
- return CallEstimate{CostEstimate: c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
+ return CallEstimate{
+ CostEstimate: c.sizeOrUnknown(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
}
case overloads.StringToBytes:
if len(args) == 1 {
- sz := c.sizeEstimate(args[0])
+ sz := c.sizeOrUnknown(args[0])
// ResultSize max is when each char converts to 4 bytes.
- return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min, Max: sz.Max * 4}}
+ return CallEstimate{
+ CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()),
+ ResultSize: &SizeEstimate{Min: sz.Min, Max: sz.Max * 4}}
}
case overloads.BytesToString:
if len(args) == 1 {
- sz := c.sizeEstimate(args[0])
+ sz := c.sizeOrUnknown(args[0])
// ResultSize min is when 4 bytes convert to 1 char.
- return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min / 4, Max: sz.Max}}
+ return CallEstimate{
+ CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()),
+ ResultSize: &SizeEstimate{Min: sz.Min / 4, Max: sz.Max}}
}
case overloads.ExtQuoteString:
if len(args) == 1 {
- sz := c.sizeEstimate(args[0])
+ sz := c.sizeOrUnknown(args[0])
// ResultSize max is when each char is escaped. 2 quote chars always added.
- return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min + 2, Max: sz.Max*2 + 2}}
+ return CallEstimate{
+ CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()),
+ ResultSize: &SizeEstimate{Min: sz.Min + 2, Max: sz.Max*2 + 2}}
}
case overloads.StartsWithString, overloads.EndsWithString:
if len(args) == 1 {
- return CallEstimate{CostEstimate: c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
+ return CallEstimate{CostEstimate: c.sizeOrUnknown(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
}
case overloads.InList:
// If a list is composed entirely of constant values this is O(1), but we don't account for that here.
// We just assume all list containment checks are O(n).
if len(args) == 2 {
- return CallEstimate{CostEstimate: c.sizeEstimate(args[1]).MultiplyByCostFactor(1).Add(argCostSum())}
+ return CallEstimate{CostEstimate: c.sizeOrUnknown(args[1]).MultiplyByCostFactor(1).Add(argCostSum())}
}
// O(nm) functions
case overloads.MatchesString:
@@ -609,19 +791,19 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
if target != nil && len(args) == 1 {
// Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0
// in case where string is empty but regex is still expensive.
- strCost := c.sizeEstimate(*target).Add(SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor)
+ strCost := c.sizeOrUnknown(*target).Add(SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor)
// We don't know how many expressions are in the regex, just the string length (a huge
// improvement here would be to somehow get a count the number of expressions in the regex or
// how many states are in the regex state machine and use that to measure regex cost).
// For now, we're making a guess that each expression in a regex is typically at least 4 chars
// in length.
- regexCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor)
+ regexCost := c.sizeOrUnknown(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor)
return CallEstimate{CostEstimate: strCost.Multiply(regexCost).Add(argCostSum())}
}
case overloads.ContainsString:
if target != nil && len(args) == 1 {
- strCost := c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor)
- substrCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor)
+ strCost := c.sizeOrUnknown(*target).MultiplyByCostFactor(common.StringTraversalCostFactor)
+ substrCost := c.sizeOrUnknown(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor)
return CallEstimate{CostEstimate: strCost.Multiply(substrCost).Add(argCostSum())}
}
case overloads.LogicalOr, overloads.LogicalAnd:
@@ -631,7 +813,9 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
argCost := CostEstimate{Min: lhs.Min, Max: lhs.Add(rhs).Max}
return CallEstimate{CostEstimate: argCost}
case overloads.Conditional:
- size := c.sizeEstimate(args[1]).Union(c.sizeEstimate(args[2]))
+ size := c.sizeOrUnknown(args[1]).Union(c.sizeOrUnknown(args[2]))
+ resultEntrySize := c.computeEntrySize(args[1].Expr()).union(c.computeEntrySize(args[2].Expr()))
+ c.setEntrySize(e, resultEntrySize)
conditionalCost := argCosts[0]
ifTrueCost := argCosts[1]
ifFalseCost := argCosts[2]
@@ -639,13 +823,19 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
return CallEstimate{CostEstimate: argCost, ResultSize: &size}
case overloads.AddString, overloads.AddBytes, overloads.AddList:
if len(args) == 2 {
- lhsSize := c.sizeEstimate(args[0])
- rhsSize := c.sizeEstimate(args[1])
+ lhsSize := c.sizeOrUnknown(args[0])
+ rhsSize := c.sizeOrUnknown(args[1])
resultSize := lhsSize.Add(rhsSize)
+ rhsEntrySize := c.computeEntrySize(args[0].Expr())
+ lhsEntrySize := c.computeEntrySize(args[1].Expr())
+ resultEntrySize := rhsEntrySize.union(lhsEntrySize)
+ if resultEntrySize != nil {
+ c.setEntrySize(e, resultEntrySize)
+ }
switch overloadID {
case overloads.AddList:
// list concatenation is O(1), but we handle it here to track size
- return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum()), ResultSize: &resultSize}
+ return CallEstimate{CostEstimate: FixedCostEstimate(1).Add(argCostSum()), ResultSize: &resultSize}
default:
return CallEstimate{CostEstimate: resultSize.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &resultSize}
}
@@ -653,8 +843,8 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString,
overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes,
overloads.Equals, overloads.NotEquals:
- lhsCost := c.sizeEstimate(args[0])
- rhsCost := c.sizeEstimate(args[1])
+ lhsCost := c.sizeOrUnknown(args[0])
+ rhsCost := c.sizeOrUnknown(args[1])
min := uint64(0)
smallestMax := lhsCost.Max
if rhsCost.Max < smallestMax {
@@ -664,43 +854,162 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
min = 1
}
// equality of 2 scalar values results in a cost of 1
- return CallEstimate{CostEstimate: CostEstimate{Min: min, Max: smallestMax}.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
+ return CallEstimate{
+ CostEstimate: CostEstimate{Min: min, Max: smallestMax}.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()),
+ }
}
// O(1) functions
// See CostTracker.costCall for more details about O(1) cost calculations
// Benchmarks suggest that most of the other operations take +/- 50% of a base cost unit
// which on an Intel xeon 2.20GHz CPU is 50ns.
- return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())}
+ return CallEstimate{CostEstimate: FixedCostEstimate(1).Add(argCostSum())}
+}
+
+func (c *coster) getType(e ast.Expr) *types.Type {
+ return c.checkedAST.GetType(e.ID())
}
-func (c *coster) getType(e *exprpb.Expr) *types.Type {
- return c.checkedAST.TypeMap[e.GetId()]
+func (c *coster) getPath(e ast.Expr) []string {
+ if e.Kind() == ast.IdentKind {
+ if v, found := c.peekLocalVar(e.AsIdent()); found {
+ return v.path[:]
+ }
+ }
+ return c.exprPaths[e.ID()][:]
}
-func (c *coster) getPath(e *exprpb.Expr) []string {
- return c.exprPath[e.GetId()]
+func (c *coster) addPath(e ast.Expr, path []string) {
+ c.exprPaths[e.ID()] = path
}
-func (c *coster) addPath(e *exprpb.Expr, path []string) {
- c.exprPath[e.GetId()] = path
+func isAccumulatorVar(name string) bool {
+ return name == parser.AccumulatorName || name == parser.HiddenAccumulatorName
}
-func (c *coster) newAstNode(e *exprpb.Expr) *astNode {
+func (c *coster) newAstNode(e ast.Expr) *astNode {
path := c.getPath(e)
- if len(path) > 0 && path[0] == parser.AccumulatorName {
+ if len(path) > 0 && isAccumulatorVar(path[0]) {
// only provide paths to root vars; omit accumulator vars
path = nil
}
- var derivedSize *SizeEstimate
- if size, ok := c.computedSizes[e.GetId()]; ok {
- derivedSize = &size
- }
return &astNode{
path: path,
t: c.getType(e),
expr: e,
- derivedSize: derivedSize}
+ derivedSize: c.computeSize(e)}
+}
+
+func (c *coster) setSize(e ast.Expr, size *SizeEstimate) {
+ if size == nil {
+ return
+ }
+ // Store the computed size with the expression
+ c.computedSizes[e.ID()] = *size
+}
+
+func (c *coster) sizeOrUnknown(node any) SizeEstimate {
+ switch v := node.(type) {
+ case ast.Expr:
+ if sz := c.computeSize(v); sz != nil {
+ return *sz
+ }
+ case AstNode:
+ if sz := v.ComputedSize(); sz != nil {
+ return *sz
+ }
+ }
+ return UnknownSizeEstimate()
+}
+
+func (c *coster) copySizeEstimates(dst, src ast.Expr) {
+ c.setSize(dst, c.computeSize(src))
+ c.setEntrySize(dst, c.computeEntrySize(src))
+}
+
+func (c *coster) computeSize(e ast.Expr) *SizeEstimate {
+ if size, ok := c.computedSizes[e.ID()]; ok {
+ return &size
+ }
+ if size := computeExprSize(e); size != nil {
+ return size
+ }
+ // Ensure size estimates are computed first as users may choose to override the costs that
+ // CEL would otherwise ascribe to the type.
+ node := astNode{expr: e, path: c.getPath(e), t: c.getType(e)}
+ if size := c.estimator.EstimateSize(node); size != nil {
+ // storing the computed size should reduce calls to EstimateSize()
+ c.computedSizes[e.ID()] = *size
+ return size
+ }
+ if size := computeTypeSize(c.getType(e)); size != nil {
+ return size
+ }
+ if e.Kind() == ast.IdentKind {
+ varName := e.AsIdent()
+ if v, ok := c.peekLocalVar(varName); ok && v.size != nil {
+ return v.size
+ }
+ }
+ return nil
+}
+
+func (c *coster) setEntrySize(e ast.Expr, size *entrySizeEstimate) {
+ if size == nil {
+ return
+ }
+ c.computedEntrySizes[e.ID()] = *size
+}
+
+func (c *coster) computeEntrySize(e ast.Expr) *entrySizeEstimate {
+ if sz, found := c.computedEntrySizes[e.ID()]; found {
+ return &sz
+ }
+ if e.Kind() == ast.IdentKind {
+ varName := e.AsIdent()
+ if v, ok := c.peekLocalVar(varName); ok && v.entrySize != nil {
+ return v.entrySize
+ }
+ }
+ return nil
+}
+
+func computeExprSize(expr ast.Expr) *SizeEstimate {
+ var v uint64
+ switch expr.Kind() {
+ case ast.LiteralKind:
+ switch ck := expr.AsLiteral().(type) {
+ case types.String:
+ // converting to runes here is an O(n) operation, but
+ // this is consistent with how size is computed at runtime,
+ // and how the language definition defines string size
+ v = uint64(len([]rune(ck)))
+ case types.Bytes:
+ v = uint64(len(ck))
+ case types.Bool, types.Double, types.Duration,
+ types.Int, types.Timestamp, types.Uint,
+ types.Null:
+ v = uint64(1)
+ default:
+ return nil
+ }
+ case ast.ListKind:
+ v = uint64(expr.AsList().Size())
+ case ast.MapKind:
+ v = uint64(expr.AsMap().Size())
+ default:
+ return nil
+ }
+ cost := FixedSizeEstimate(v)
+ return &cost
+}
+
+func computeTypeSize(t *types.Type) *SizeEstimate {
+ if isScalar(t) {
+ cost := FixedSizeEstimate(1)
+ return &cost
+ }
+ return nil
}
// isScalar returns true if the given type is known to be of a constant size at
@@ -710,10 +1019,24 @@ func isScalar(t *types.Type) bool {
switch t.Kind() {
case types.BoolKind, types.DoubleKind, types.DurationKind, types.IntKind, types.TimestampKind, types.UintKind:
return true
+ case types.OpaqueKind:
+ if t.TypeName() == "optional_type" {
+ return isScalar(t.Parameters()[0])
+ }
}
return false
}
var (
doubleTwoTo64 = math.Ldexp(1.0, 64)
+
+ unknownSizeEstimate = SizeEstimate{Min: 0, Max: math.MaxUint64}
+ unknownCostEstimate = unknownSizeEstimate.MultiplyByCostFactor(1)
+
+ selectAndIdentCost = FixedCostEstimate(common.SelectAndIdentCost)
+ constCost = FixedCostEstimate(common.ConstCost)
+
+ createListBaseCost = FixedCostEstimate(common.ListCreateBaseCost)
+ createMapBaseCost = FixedCostEstimate(common.MapCreateBaseCost)
+ createMessageBaseCost = FixedCostEstimate(common.StructCreateBaseCost)
)
diff --git a/vendor/github.com/google/cel-go/checker/decls/decls.go b/vendor/github.com/google/cel-go/checker/decls/decls.go
index 0d91bef51..e013d2c2b 100644
--- a/vendor/github.com/google/cel-go/checker/decls/decls.go
+++ b/vendor/github.com/google/cel-go/checker/decls/decls.go
@@ -67,7 +67,7 @@ func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type {
// NewOptionalType constructs an abstract type indicating that the parameterized type
// may be contained within the object.
func NewOptionalType(paramType *exprpb.Type) *exprpb.Type {
- return NewAbstractType("optional", paramType)
+ return NewAbstractType("optional_type", paramType)
}
// NewFunctionType creates a function invocation contract, typically only used
@@ -91,6 +91,17 @@ func NewFunction(name string,
Overloads: overloads}}}
}
+// NewFunctionWithDoc creates a named function declaration with a description and one or more overloads.
+func NewFunctionWithDoc(name, doc string,
+ overloads ...*exprpb.Decl_FunctionDecl_Overload) *exprpb.Decl {
+ return &exprpb.Decl{
+ Name: name,
+ DeclKind: &exprpb.Decl_Function{
+ Function: &exprpb.Decl_FunctionDecl{
+ // Doc: desc,
+ Overloads: overloads}}}
+}
+
// NewIdent creates a named identifier declaration with an optional literal
// value.
//
@@ -98,28 +109,37 @@ func NewFunction(name string,
//
// Deprecated: Use NewVar or NewConst instead.
func NewIdent(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl {
+ return newIdent(name, t, v, "")
+}
+
+func newIdent(name string, t *exprpb.Type, v *exprpb.Constant, desc string) *exprpb.Decl {
return &exprpb.Decl{
Name: name,
DeclKind: &exprpb.Decl_Ident{
Ident: &exprpb.Decl_IdentDecl{
Type: t,
- Value: v}}}
+ Value: v,
+ Doc: desc}}}
}
// NewConst creates a constant identifier with a CEL constant literal value.
func NewConst(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl {
- return NewIdent(name, t, v)
+ return newIdent(name, t, v, "")
}
// NewVar creates a variable identifier.
func NewVar(name string, t *exprpb.Type) *exprpb.Decl {
- return NewIdent(name, t, nil)
+ return newIdent(name, t, nil, "")
+}
+
+// NewVarWithDoc creates a variable identifier with a type and a description string.
+func NewVarWithDoc(name string, t *exprpb.Type, desc string) *exprpb.Decl {
+ return newIdent(name, t, nil, desc)
}
// NewInstanceOverload creates a instance function overload contract.
// First element of argTypes is instance.
-func NewInstanceOverload(id string, argTypes []*exprpb.Type,
- resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
+func NewInstanceOverload(id string, argTypes []*exprpb.Type, resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
return &exprpb.Decl_FunctionDecl_Overload{
OverloadId: id,
ResultType: resultType,
@@ -154,8 +174,7 @@ func NewObjectType(typeName string) *exprpb.Type {
// NewOverload creates a function overload declaration which contains a unique
// overload id as well as the expected argument and result types. Overloads
// must be aggregated within a Function declaration.
-func NewOverload(id string, argTypes []*exprpb.Type,
- resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
+func NewOverload(id string, argTypes []*exprpb.Type, resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
return &exprpb.Decl_FunctionDecl_Overload{
OverloadId: id,
ResultType: resultType,
@@ -231,7 +250,5 @@ func NewWrapperType(wrapped *exprpb.Type) *exprpb.Type {
// TODO: return an error
panic("Wrapped type must be a primitive")
}
- return &exprpb.Type{
- TypeKind: &exprpb.Type_Wrapper{
- Wrapper: primitive}}
+ return &exprpb.Type{TypeKind: &exprpb.Type_Wrapper{Wrapper: primitive}}
}
diff --git a/vendor/github.com/google/cel-go/checker/env.go b/vendor/github.com/google/cel-go/checker/env.go
index 70682b17c..d5ac05014 100644
--- a/vendor/github.com/google/cel-go/checker/env.go
+++ b/vendor/github.com/google/cel-go/checker/env.go
@@ -146,6 +146,14 @@ func (e *Env) LookupIdent(name string) *decls.VariableDecl {
return decl
}
+ if i, found := e.provider.FindIdent(candidate); found {
+ if t, ok := i.(*types.Type); ok {
+ decl := decls.NewVariable(candidate, types.NewTypeTypeWithParam(t))
+ e.declarations.AddIdent(decl)
+ return decl
+ }
+ }
+
// Next try to import this as an enum value by splitting the name in a type prefix and
// the enum inside.
if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType {
diff --git a/vendor/github.com/google/cel-go/checker/errors.go b/vendor/github.com/google/cel-go/checker/errors.go
index c2b96498d..3535440ba 100644
--- a/vendor/github.com/google/cel-go/checker/errors.go
+++ b/vendor/github.com/google/cel-go/checker/errors.go
@@ -15,13 +15,9 @@
package checker
import (
- "reflect"
-
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/types"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// typeErrors is a specialization of Errors.
@@ -34,9 +30,9 @@ func (e *typeErrors) fieldTypeMismatch(id int64, l common.Location, name string,
name, FormatCELType(field), FormatCELType(value))
}
-func (e *typeErrors) incompatibleType(id int64, l common.Location, ex *exprpb.Expr, prev, next *types.Type) {
+func (e *typeErrors) incompatibleType(id int64, l common.Location, ex ast.Expr, prev, next *types.Type) {
e.errs.ReportErrorAtID(id, l,
- "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next)
+ "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next)
}
func (e *typeErrors) noMatchingOverload(id int64, l common.Location, name string, args []*types.Type, isInstance bool) {
@@ -49,7 +45,11 @@ func (e *typeErrors) notAComprehensionRange(id int64, l common.Location, t *type
FormatCELType(t))
}
-func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field *exprpb.Expr) {
+func (e *typeErrors) notAnOptionalFieldSelectionCall(id int64, l common.Location, err string) {
+ e.errs.ReportErrorAtID(id, l, "unsupported optional field selection: %s", err)
+}
+
+func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field ast.Expr) {
e.errs.ReportErrorAtID(id, l, "unsupported optional field selection: %v", field)
}
@@ -61,9 +61,9 @@ func (e *typeErrors) notAMessageType(id int64, l common.Location, typeName strin
e.errs.ReportErrorAtID(id, l, "'%s' is not a message type", typeName)
}
-func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex *exprpb.Expr, prev, next *ast.ReferenceInfo) {
+func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex ast.Expr, prev, next *ast.ReferenceInfo) {
e.errs.ReportErrorAtID(id, l,
- "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next)
+ "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next)
}
func (e *typeErrors) typeDoesNotSupportFieldSelection(id int64, l common.Location, t *types.Type) {
@@ -87,6 +87,6 @@ func (e *typeErrors) unexpectedFailedResolution(id int64, l common.Location, typ
e.errs.ReportErrorAtID(id, l, "unexpected failed resolution of '%s'", typeName)
}
-func (e *typeErrors) unexpectedASTType(id int64, l common.Location, ex *exprpb.Expr) {
- e.errs.ReportErrorAtID(id, l, "unrecognized ast type: %v", reflect.TypeOf(ex))
+func (e *typeErrors) unexpectedASTType(id int64, l common.Location, kind, typeName string) {
+ e.errs.ReportErrorAtID(id, l, "unexpected %s type: %v", kind, typeName)
}
diff --git a/vendor/github.com/google/cel-go/checker/printer.go b/vendor/github.com/google/cel-go/checker/printer.go
index 15cba06ee..7a3984f02 100644
--- a/vendor/github.com/google/cel-go/checker/printer.go
+++ b/vendor/github.com/google/cel-go/checker/printer.go
@@ -17,40 +17,40 @@ package checker
import (
"sort"
+ "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/debug"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
type semanticAdorner struct {
- checks *exprpb.CheckedExpr
+ checked *ast.AST
}
var _ debug.Adorner = &semanticAdorner{}
func (a *semanticAdorner) GetMetadata(elem any) string {
result := ""
- e, isExpr := elem.(*exprpb.Expr)
+ e, isExpr := elem.(ast.Expr)
if !isExpr {
return result
}
- t := a.checks.TypeMap[e.GetId()]
+ t := a.checked.TypeMap()[e.ID()]
if t != nil {
result += "~"
- result += FormatCheckedType(t)
+ result += FormatCELType(t)
}
- switch e.GetExprKind().(type) {
- case *exprpb.Expr_IdentExpr,
- *exprpb.Expr_CallExpr,
- *exprpb.Expr_StructExpr,
- *exprpb.Expr_SelectExpr:
- if ref, found := a.checks.ReferenceMap[e.GetId()]; found {
- if len(ref.GetOverloadId()) == 0 {
+ switch e.Kind() {
+ case ast.IdentKind,
+ ast.CallKind,
+ ast.ListKind,
+ ast.StructKind,
+ ast.SelectKind:
+ if ref, found := a.checked.ReferenceMap()[e.ID()]; found {
+ if len(ref.OverloadIDs) == 0 {
result += "^" + ref.Name
} else {
- sort.Strings(ref.GetOverloadId())
- for i, overload := range ref.GetOverloadId() {
+ sort.Strings(ref.OverloadIDs)
+ for i, overload := range ref.OverloadIDs {
if i == 0 {
result += "^"
} else {
@@ -68,7 +68,7 @@ func (a *semanticAdorner) GetMetadata(elem any) string {
// Print returns a string representation of the Expr message,
// annotated with types from the CheckedExpr. The Expr must
// be a sub-expression embedded in the CheckedExpr.
-func Print(e *exprpb.Expr, checks *exprpb.CheckedExpr) string {
- a := &semanticAdorner{checks: checks}
+func Print(e ast.Expr, checked *ast.AST) string {
+ a := &semanticAdorner{checked: checked}
return debug.ToAdornedDebugString(e, a)
}
diff --git a/vendor/github.com/google/cel-go/checker/standard.go b/vendor/github.com/google/cel-go/checker/standard.go
deleted file mode 100644
index 11b35b80e..000000000
--- a/vendor/github.com/google/cel-go/checker/standard.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checker
-
-import (
- "github.com/google/cel-go/common/stdlib"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// StandardFunctions returns the Decls for all functions in the evaluator.
-//
-// Deprecated: prefer stdlib.FunctionExprDecls()
-func StandardFunctions() []*exprpb.Decl {
- return stdlib.FunctionExprDecls()
-}
-
-// StandardTypes returns the set of type identifiers for standard library types.
-//
-// Deprecated: prefer stdlib.TypeExprDecls()
-func StandardTypes() []*exprpb.Decl {
- return stdlib.TypeExprDecls()
-}
diff --git a/vendor/github.com/google/cel-go/checker/types.go b/vendor/github.com/google/cel-go/checker/types.go
index e2373d1b7..4c65b2737 100644
--- a/vendor/github.com/google/cel-go/checker/types.go
+++ b/vendor/github.com/google/cel-go/checker/types.go
@@ -41,7 +41,7 @@ func isError(t *types.Type) bool {
func isOptional(t *types.Type) bool {
if t.Kind() == types.OpaqueKind {
- return t.TypeName() == "optional"
+ return t.TypeName() == "optional_type"
}
return false
}
@@ -137,7 +137,11 @@ func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool {
case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind,
types.AnyKind, types.DurationKind, types.TimestampKind,
types.StructKind:
- return t1.IsAssignableType(t2)
+ // Test whether t2 is assignable from t1. The order of this check won't usually matter;
+ // however, there may be cases where type capabilities are expanded beyond what is supported
+ // in the current common/types package. For example, an interface designation for a group of
+ // Struct types.
+ return t2.IsAssignableType(t1)
case types.TypeKind:
return kind2 == types.TypeKind
case types.OpaqueKind, types.ListKind, types.MapKind:
@@ -256,7 +260,7 @@ func notReferencedIn(m *mapping, t, withinType *types.Type) bool {
return true
}
return notReferencedIn(m, t, wtSub)
- case types.OpaqueKind, types.ListKind, types.MapKind:
+ case types.OpaqueKind, types.ListKind, types.MapKind, types.TypeKind:
for _, pt := range withinType.Parameters() {
if !notReferencedIn(m, t, pt) {
return false
@@ -288,7 +292,8 @@ func substitute(m *mapping, t *types.Type, typeParamToDyn bool) *types.Type {
substitute(m, t.Parameters()[1], typeParamToDyn))
case types.TypeKind:
if len(t.Parameters()) > 0 {
- return types.NewTypeTypeWithParam(substitute(m, t.Parameters()[0], typeParamToDyn))
+ tParam := t.Parameters()[0]
+ return types.NewTypeTypeWithParam(substitute(m, tParam, typeParamToDyn))
}
return t
default:
diff --git a/vendor/github.com/google/cel-go/common/BUILD.bazel b/vendor/github.com/google/cel-go/common/BUILD.bazel
index d6165b13a..1b1b7914d 100644
--- a/vendor/github.com/google/cel-go/common/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/BUILD.bazel
@@ -9,6 +9,7 @@ go_library(
name = "go_default_library",
srcs = [
"cost.go",
+ "doc.go",
"error.go",
"errors.go",
"location.go",
@@ -18,7 +19,6 @@ go_library(
deps = [
"//common/runes:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
- "@org_golang_x_text//width:go_default_library",
],
)
@@ -26,6 +26,7 @@ go_test(
name = "go_default_test",
size = "small",
srcs = [
+ "doc_test.go",
"errors_test.go",
"source_test.go",
],
diff --git a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel
index 7269cdff5..9824f57a9 100644
--- a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel
@@ -1,12 +1,7 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(
- default_visibility = [
- "//cel:__subpackages__",
- "//checker:__subpackages__",
- "//common:__subpackages__",
- "//interpreter:__subpackages__",
- ],
+ default_visibility = ["//visibility:public"],
licenses = ["notice"], # Apache 2.0
)
@@ -14,13 +9,19 @@ go_library(
name = "go_default_library",
srcs = [
"ast.go",
+ "conversion.go",
"expr.go",
+ "factory.go",
+ "navigable.go",
],
importpath = "github.com/google/cel-go/common/ast",
- deps = [
+ deps = [
+ "//common:go_default_library",
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
+ "@dev_cel_expr//:expr",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
],
)
@@ -29,17 +30,20 @@ go_test(
name = "go_default_test",
srcs = [
"ast_test.go",
+ "conversion_test.go",
"expr_test.go",
+ "navigable_test.go",
],
embed = [
":go_default_library",
],
- deps = [
+ deps = [
"//checker:go_default_library",
"//checker/decls:go_default_library",
"//common:go_default_library",
"//common/containers:go_default_library",
"//common/decls:go_default_library",
+ "//common/operators:go_default_library",
"//common/overloads:go_default_library",
"//common/stdlib:go_default_library",
"//common/types:go_default_library",
@@ -48,5 +52,6 @@ go_test(
"//test/proto3pb:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//encoding/prototext:go_default_library",
],
-)
\ No newline at end of file
+)
diff --git a/vendor/github.com/google/cel-go/common/ast/ast.go b/vendor/github.com/google/cel-go/common/ast/ast.go
index b3c150793..62c09cfc6 100644
--- a/vendor/github.com/google/cel-go/common/ast/ast.go
+++ b/vendor/github.com/google/cel-go/common/ast/ast.go
@@ -16,74 +16,369 @@
package ast
import (
- "fmt"
-
+ "github.com/google/cel-go/common"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
+)
- structpb "google.golang.org/protobuf/types/known/structpb"
+// AST contains a protobuf expression and source info along with CEL-native type and reference information.
+type AST struct {
+ expr Expr
+ sourceInfo *SourceInfo
+ typeMap map[int64]*types.Type
+ refMap map[int64]*ReferenceInfo
+}
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
+// Expr returns the root ast.Expr value in the AST.
+func (a *AST) Expr() Expr {
+ if a == nil {
+ return nilExpr
+ }
+ return a.expr
+}
-// CheckedAST contains a protobuf expression and source info along with CEL-native type and reference information.
-type CheckedAST struct {
- Expr *exprpb.Expr
- SourceInfo *exprpb.SourceInfo
- TypeMap map[int64]*types.Type
- ReferenceMap map[int64]*ReferenceInfo
-}
-
-// CheckedASTToCheckedExpr converts a CheckedAST to a CheckedExpr protobouf.
-func CheckedASTToCheckedExpr(ast *CheckedAST) (*exprpb.CheckedExpr, error) {
- refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap))
- for id, ref := range ast.ReferenceMap {
- r, err := ReferenceInfoToReferenceExpr(ref)
- if err != nil {
- return nil, err
- }
- refMap[id] = r
+// SourceInfo returns the source metadata associated with the parse / type-check passes.
+func (a *AST) SourceInfo() *SourceInfo {
+ if a == nil {
+ return nil
+ }
+ return a.sourceInfo
+}
+
+// GetType returns the type for the expression at the given id, if one exists, else types.DynType.
+func (a *AST) GetType(id int64) *types.Type {
+ if t, found := a.TypeMap()[id]; found {
+ return t
+ }
+ return types.DynType
+}
+
+// SetType sets the type of the expression node at the given id.
+func (a *AST) SetType(id int64, t *types.Type) {
+ if a == nil {
+ return
+ }
+ a.typeMap[id] = t
+}
+
+// TypeMap returns the map of expression ids to type-checked types.
+//
+// If the AST is not type-checked, the map will be empty.
+func (a *AST) TypeMap() map[int64]*types.Type {
+ if a == nil {
+ return map[int64]*types.Type{}
+ }
+ return a.typeMap
+}
+
+// GetOverloadIDs returns the set of overload function names for a given expression id.
+//
+// If the expression id is not a function call, or the AST is not type-checked, the result will be empty.
+func (a *AST) GetOverloadIDs(id int64) []string {
+ if ref, found := a.ReferenceMap()[id]; found {
+ return ref.OverloadIDs
+ }
+ return []string{}
+}
+
+// ReferenceMap returns the map of expression id to identifier, constant, and function references.
+func (a *AST) ReferenceMap() map[int64]*ReferenceInfo {
+ if a == nil {
+ return map[int64]*ReferenceInfo{}
+ }
+ return a.refMap
+}
+
+// SetReference adds a reference to the checked AST type map.
+func (a *AST) SetReference(id int64, r *ReferenceInfo) {
+ if a == nil {
+ return
+ }
+ a.refMap[id] = r
+}
+
+// IsChecked returns whether the AST is type-checked.
+func (a *AST) IsChecked() bool {
+ return a != nil && len(a.TypeMap()) > 0
+}
+
+// NewAST creates a base AST instance with an ast.Expr and ast.SourceInfo value.
+func NewAST(e Expr, sourceInfo *SourceInfo) *AST {
+ if e == nil {
+ e = nilExpr
}
- typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap))
- for id, typ := range ast.TypeMap {
- t, err := types.TypeToExprType(typ)
- if err != nil {
- return nil, err
+ return &AST{
+ expr: e,
+ sourceInfo: sourceInfo,
+ typeMap: make(map[int64]*types.Type),
+ refMap: make(map[int64]*ReferenceInfo),
+ }
+}
+
+// NewCheckedAST wraps an parsed AST and augments it with type and reference metadata.
+func NewCheckedAST(parsed *AST, typeMap map[int64]*types.Type, refMap map[int64]*ReferenceInfo) *AST {
+ return &AST{
+ expr: parsed.Expr(),
+ sourceInfo: parsed.SourceInfo(),
+ typeMap: typeMap,
+ refMap: refMap,
+ }
+}
+
+// Copy creates a deep copy of the Expr and SourceInfo values in the input AST.
+//
+// Copies of the Expr value are generated using an internal default ExprFactory.
+func Copy(a *AST) *AST {
+ if a == nil {
+ return nil
+ }
+ e := defaultFactory.CopyExpr(a.expr)
+ if !a.IsChecked() {
+ return NewAST(e, CopySourceInfo(a.SourceInfo()))
+ }
+ typesCopy := make(map[int64]*types.Type, len(a.typeMap))
+ for id, t := range a.typeMap {
+ typesCopy[id] = t
+ }
+ refsCopy := make(map[int64]*ReferenceInfo, len(a.refMap))
+ for id, r := range a.refMap {
+ refsCopy[id] = r
+ }
+ return NewCheckedAST(NewAST(e, CopySourceInfo(a.SourceInfo())), typesCopy, refsCopy)
+}
+
+// MaxID returns the upper-bound, non-inclusive, of ids present within the AST's Expr value.
+func MaxID(a *AST) int64 {
+ visitor := &maxIDVisitor{maxID: 1}
+ PostOrderVisit(a.Expr(), visitor)
+ for id, call := range a.SourceInfo().MacroCalls() {
+ PostOrderVisit(call, visitor)
+ if id > visitor.maxID {
+ visitor.maxID = id + 1
}
- typeMap[id] = t
- }
- return &exprpb.CheckedExpr{
- Expr: ast.Expr,
- SourceInfo: ast.SourceInfo,
- ReferenceMap: refMap,
- TypeMap: typeMap,
- }, nil
-}
-
-// CheckedExprToCheckedAST converts a CheckedExpr protobuf to a CheckedAST instance.
-func CheckedExprToCheckedAST(checked *exprpb.CheckedExpr) (*CheckedAST, error) {
- refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap()))
- for id, ref := range checked.GetReferenceMap() {
- r, err := ReferenceExprToReferenceInfo(ref)
- if err != nil {
- return nil, err
+ }
+ return visitor.maxID + 1
+}
+
+// Heights computes the heights of all AST expressions and returns a map from expression id to height.
+func Heights(a *AST) map[int64]int {
+ visitor := make(heightVisitor)
+ PostOrderVisit(a.Expr(), visitor)
+ return visitor
+}
+
+// NewSourceInfo creates a simple SourceInfo object from an input common.Source value.
+func NewSourceInfo(src common.Source) *SourceInfo {
+ var lineOffsets []int32
+ var desc string
+ baseLine := int32(0)
+ baseCol := int32(0)
+ if src != nil {
+ desc = src.Description()
+ lineOffsets = src.LineOffsets()
+ // Determine whether the source metadata should be computed relative
+ // to a base line and column value. This can be determined by requesting
+ // the location for offset 0 from the source object.
+ if loc, found := src.OffsetLocation(0); found {
+ baseLine = int32(loc.Line()) - 1
+ baseCol = int32(loc.Column())
}
- refMap[id] = r
}
- typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap()))
- for id, typ := range checked.GetTypeMap() {
- t, err := types.ExprTypeToType(typ)
- if err != nil {
- return nil, err
+ return &SourceInfo{
+ desc: desc,
+ lines: lineOffsets,
+ baseLine: baseLine,
+ baseCol: baseCol,
+ offsetRanges: make(map[int64]OffsetRange),
+ macroCalls: make(map[int64]Expr),
+ }
+}
+
+// CopySourceInfo creates a deep copy of the MacroCalls within the input SourceInfo.
+//
+// Copies of macro Expr values are generated using an internal default ExprFactory.
+func CopySourceInfo(info *SourceInfo) *SourceInfo {
+ if info == nil {
+ return nil
+ }
+ rangesCopy := make(map[int64]OffsetRange, len(info.offsetRanges))
+ for id, off := range info.offsetRanges {
+ rangesCopy[id] = off
+ }
+ callsCopy := make(map[int64]Expr, len(info.macroCalls))
+ for id, call := range info.macroCalls {
+ callsCopy[id] = defaultFactory.CopyExpr(call)
+ }
+ return &SourceInfo{
+ syntax: info.syntax,
+ desc: info.desc,
+ lines: info.lines,
+ baseLine: info.baseLine,
+ baseCol: info.baseCol,
+ offsetRanges: rangesCopy,
+ macroCalls: callsCopy,
+ }
+}
+
+// SourceInfo records basic information about the expression as a textual input and
+// as a parsed expression value.
+type SourceInfo struct {
+ syntax string
+ desc string
+ lines []int32
+ baseLine int32
+ baseCol int32
+ offsetRanges map[int64]OffsetRange
+ macroCalls map[int64]Expr
+}
+
+// SyntaxVersion returns the syntax version associated with the text expression.
+func (s *SourceInfo) SyntaxVersion() string {
+ if s == nil {
+ return ""
+ }
+ return s.syntax
+}
+
+// Description provides information about where the expression came from.
+func (s *SourceInfo) Description() string {
+ if s == nil {
+ return ""
+ }
+ return s.desc
+}
+
+// LineOffsets returns a list of the 0-based character offsets in the input text where newlines appear.
+func (s *SourceInfo) LineOffsets() []int32 {
+ if s == nil {
+ return []int32{}
+ }
+ return s.lines
+}
+
+// MacroCalls returns a map of expression id to ast.Expr value where the id represents the expression
+// node where the macro was inserted into the AST, and the ast.Expr value represents the original call
+// signature which was replaced.
+func (s *SourceInfo) MacroCalls() map[int64]Expr {
+ if s == nil {
+ return map[int64]Expr{}
+ }
+ return s.macroCalls
+}
+
+// GetMacroCall returns the original ast.Expr value for the given expression if it was generated via
+// a macro replacement.
+//
+// Note, parsing options must be enabled to track macro calls before this method will return a value.
+func (s *SourceInfo) GetMacroCall(id int64) (Expr, bool) {
+ e, found := s.MacroCalls()[id]
+ return e, found
+}
+
+// SetMacroCall records a macro call at a specific location.
+func (s *SourceInfo) SetMacroCall(id int64, e Expr) {
+ if s != nil {
+ s.macroCalls[id] = e
+ }
+}
+
+// ClearMacroCall removes the macro call at the given expression id.
+func (s *SourceInfo) ClearMacroCall(id int64) {
+ if s != nil {
+ delete(s.macroCalls, id)
+ }
+}
+
+// OffsetRanges returns a map of expression id to OffsetRange values where the range indicates either:
+// the start and end position in the input stream where the expression occurs, or the start position
+// only. If the range only captures start position, the stop position of the range will be equal to
+// the start.
+func (s *SourceInfo) OffsetRanges() map[int64]OffsetRange {
+ if s == nil {
+ return map[int64]OffsetRange{}
+ }
+ return s.offsetRanges
+}
+
+// GetOffsetRange retrieves an OffsetRange for the given expression id if one exists.
+func (s *SourceInfo) GetOffsetRange(id int64) (OffsetRange, bool) {
+ if s == nil {
+ return OffsetRange{}, false
+ }
+ o, found := s.offsetRanges[id]
+ return o, found
+}
+
+// SetOffsetRange sets the OffsetRange for the given expression id.
+func (s *SourceInfo) SetOffsetRange(id int64, o OffsetRange) {
+ if s == nil {
+ return
+ }
+ s.offsetRanges[id] = o
+}
+
+// ClearOffsetRange removes the OffsetRange for the given expression id.
+func (s *SourceInfo) ClearOffsetRange(id int64) {
+ if s != nil {
+ delete(s.offsetRanges, id)
+ }
+}
+
+// GetStartLocation calculates the human-readable 1-based line and 0-based column of the first character
+// of the expression node at the id.
+func (s *SourceInfo) GetStartLocation(id int64) common.Location {
+ if o, found := s.GetOffsetRange(id); found {
+ return s.GetLocationByOffset(o.Start)
+ }
+ return common.NoLocation
+}
+
+// GetStopLocation calculates the human-readable 1-based line and 0-based column of the last character for
+// the expression node at the given id.
+//
+// If the SourceInfo was generated from a serialized protobuf representation, the stop location will
+// be identical to the start location for the expression.
+func (s *SourceInfo) GetStopLocation(id int64) common.Location {
+ if o, found := s.GetOffsetRange(id); found {
+ return s.GetLocationByOffset(o.Stop)
+ }
+ return common.NoLocation
+}
+
+// GetLocationByOffset returns the line and column information for a given character offset.
+func (s *SourceInfo) GetLocationByOffset(offset int32) common.Location {
+ line := 1
+ col := int(offset)
+ for _, lineOffset := range s.LineOffsets() {
+ if lineOffset > offset {
+ break
}
- typeMap[id] = t
+ line++
+ col = int(offset - lineOffset)
+ }
+ return common.NewLocation(line, col)
+}
+
+// ComputeOffset calculates the 0-based character offset from a 1-based line and 0-based column.
+func (s *SourceInfo) ComputeOffset(line, col int32) int32 {
+ if s != nil {
+ line = s.baseLine + line
+ col = s.baseCol + col
+ }
+ if line == 1 {
+ return col
+ }
+ if line < 1 || line > int32(len(s.LineOffsets())) {
+ return -1
}
- return &CheckedAST{
- Expr: checked.GetExpr(),
- SourceInfo: checked.GetSourceInfo(),
- ReferenceMap: refMap,
- TypeMap: typeMap,
- }, nil
+ offset := s.LineOffsets()[line-2]
+ return offset + col
+}
+
+// OffsetRange captures the start and stop positions of a section of text in the input expression.
+type OffsetRange struct {
+ Start int32
+ Stop int32
}
// ReferenceInfo contains a CEL native representation of an identifier reference which may refer to
@@ -149,78 +444,92 @@ func (r *ReferenceInfo) Equals(other *ReferenceInfo) bool {
return true
}
-// ReferenceInfoToReferenceExpr converts a ReferenceInfo instance to a protobuf Reference suitable for serialization.
-func ReferenceInfoToReferenceExpr(info *ReferenceInfo) (*exprpb.Reference, error) {
- c, err := ValToConstant(info.Value)
- if err != nil {
- return nil, err
+type maxIDVisitor struct {
+ maxID int64
+ *baseVisitor
+}
+
+// VisitExpr updates the max identifier if the incoming expression id is greater than previously observed.
+func (v *maxIDVisitor) VisitExpr(e Expr) {
+ if v.maxID < e.ID() {
+ v.maxID = e.ID()
}
- return &exprpb.Reference{
- Name: info.Name,
- OverloadId: info.OverloadIDs,
- Value: c,
- }, nil
}
-// ReferenceExprToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance.
-func ReferenceExprToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) {
- v, err := ConstantToVal(ref.GetValue())
- if err != nil {
- return nil, err
+// VisitEntryExpr updates the max identifier if the incoming entry id is greater than previously observed.
+func (v *maxIDVisitor) VisitEntryExpr(e EntryExpr) {
+ if v.maxID < e.ID() {
+ v.maxID = e.ID()
}
- return &ReferenceInfo{
- Name: ref.GetName(),
- OverloadIDs: ref.GetOverloadId(),
- Value: v,
- }, nil
}
-// ValToConstant converts a CEL-native ref.Val to a protobuf Constant.
+type heightVisitor map[int64]int
+
+// VisitExpr computes the height of a given node as the max height of its children plus one.
//
-// Only simple scalar types are supported by this method.
-func ValToConstant(v ref.Val) (*exprpb.Constant, error) {
- if v == nil {
- return nil, nil
- }
- switch v.Type() {
- case types.BoolType:
- return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil
- case types.BytesType:
- return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil
- case types.DoubleType:
- return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil
- case types.IntType:
- return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil
- case types.NullType:
- return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil
- case types.StringType:
- return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil
- case types.UintType:
- return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil
- }
- return nil, fmt.Errorf("unsupported constant kind: %v", v.Type())
-}
-
-// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val.
-func ConstantToVal(c *exprpb.Constant) (ref.Val, error) {
- if c == nil {
- return nil, nil
- }
- switch c.GetConstantKind().(type) {
- case *exprpb.Constant_BoolValue:
- return types.Bool(c.GetBoolValue()), nil
- case *exprpb.Constant_BytesValue:
- return types.Bytes(c.GetBytesValue()), nil
- case *exprpb.Constant_DoubleValue:
- return types.Double(c.GetDoubleValue()), nil
- case *exprpb.Constant_Int64Value:
- return types.Int(c.GetInt64Value()), nil
- case *exprpb.Constant_NullValue:
- return types.NullValue, nil
- case *exprpb.Constant_StringValue:
- return types.String(c.GetStringValue()), nil
- case *exprpb.Constant_Uint64Value:
- return types.Uint(c.GetUint64Value()), nil
- }
- return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind())
+// Identifiers and literals are treated as having a height of zero.
+func (hv heightVisitor) VisitExpr(e Expr) {
+ // default includes IdentKind, LiteralKind
+ hv[e.ID()] = 0
+ switch e.Kind() {
+ case SelectKind:
+ hv[e.ID()] = 1 + hv[e.AsSelect().Operand().ID()]
+ case CallKind:
+ c := e.AsCall()
+ height := hv.maxHeight(c.Args()...)
+ if c.IsMemberFunction() {
+ tHeight := hv[c.Target().ID()]
+ if tHeight > height {
+ height = tHeight
+ }
+ }
+ hv[e.ID()] = 1 + height
+ case ListKind:
+ l := e.AsList()
+ hv[e.ID()] = 1 + hv.maxHeight(l.Elements()...)
+ case MapKind:
+ m := e.AsMap()
+ hv[e.ID()] = 1 + hv.maxEntryHeight(m.Entries()...)
+ case StructKind:
+ s := e.AsStruct()
+ hv[e.ID()] = 1 + hv.maxEntryHeight(s.Fields()...)
+ case ComprehensionKind:
+ comp := e.AsComprehension()
+ hv[e.ID()] = 1 + hv.maxHeight(comp.IterRange(), comp.AccuInit(), comp.LoopCondition(), comp.LoopStep(), comp.Result())
+ }
+}
+
+// VisitEntryExpr computes the max height of a map or struct entry and associates the height with the entry id.
+func (hv heightVisitor) VisitEntryExpr(e EntryExpr) {
+ hv[e.ID()] = 0
+ switch e.Kind() {
+ case MapEntryKind:
+ me := e.AsMapEntry()
+ hv[e.ID()] = hv.maxHeight(me.Value(), me.Key())
+ case StructFieldKind:
+ sf := e.AsStructField()
+ hv[e.ID()] = hv[sf.Value().ID()]
+ }
+}
+
+func (hv heightVisitor) maxHeight(exprs ...Expr) int {
+ max := 0
+ for _, e := range exprs {
+ h := hv[e.ID()]
+ if h > max {
+ max = h
+ }
+ }
+ return max
+}
+
+func (hv heightVisitor) maxEntryHeight(entries ...EntryExpr) int {
+ max := 0
+ for _, e := range entries {
+ h := hv[e.ID()]
+ if h > max {
+ max = h
+ }
+ }
+ return max
}
diff --git a/vendor/github.com/google/cel-go/common/ast/conversion.go b/vendor/github.com/google/cel-go/common/ast/conversion.go
new file mode 100644
index 000000000..435d8f654
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/ast/conversion.go
@@ -0,0 +1,659 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/proto"
+
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ celpb "cel.dev/expr"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// ToProto converts an AST to a CheckedExpr protobouf.
+func ToProto(ast *AST) (*exprpb.CheckedExpr, error) {
+ refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap()))
+ for id, ref := range ast.ReferenceMap() {
+ r, err := ReferenceInfoToProto(ref)
+ if err != nil {
+ return nil, err
+ }
+ refMap[id] = r
+ }
+ typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap()))
+ for id, typ := range ast.TypeMap() {
+ t, err := types.TypeToExprType(typ)
+ if err != nil {
+ return nil, err
+ }
+ typeMap[id] = t
+ }
+ e, err := ExprToProto(ast.Expr())
+ if err != nil {
+ return nil, err
+ }
+ info, err := SourceInfoToProto(ast.SourceInfo())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.CheckedExpr{
+ Expr: e,
+ SourceInfo: info,
+ ReferenceMap: refMap,
+ TypeMap: typeMap,
+ }, nil
+}
+
+// ToAST converts a CheckedExpr protobuf to an AST instance.
+func ToAST(checked *exprpb.CheckedExpr) (*AST, error) {
+ refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap()))
+ for id, ref := range checked.GetReferenceMap() {
+ r, err := ProtoToReferenceInfo(ref)
+ if err != nil {
+ return nil, err
+ }
+ refMap[id] = r
+ }
+ typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap()))
+ for id, typ := range checked.GetTypeMap() {
+ t, err := types.ExprTypeToType(typ)
+ if err != nil {
+ return nil, err
+ }
+ typeMap[id] = t
+ }
+ info, err := ProtoToSourceInfo(checked.GetSourceInfo())
+ if err != nil {
+ return nil, err
+ }
+ root, err := ProtoToExpr(checked.GetExpr())
+ if err != nil {
+ return nil, err
+ }
+ ast := NewCheckedAST(NewAST(root, info), typeMap, refMap)
+ return ast, nil
+}
+
+// ProtoToExpr converts a protobuf Expr value to an ast.Expr value.
+func ProtoToExpr(e *exprpb.Expr) (Expr, error) {
+ factory := NewExprFactory()
+ return exprInternal(factory, e)
+}
+
+// ProtoToEntryExpr converts a protobuf struct/map entry to an ast.EntryExpr
+func ProtoToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) {
+ factory := NewExprFactory()
+ switch e.GetKeyKind().(type) {
+ case *exprpb.Expr_CreateStruct_Entry_FieldKey:
+ return exprStructField(factory, e.GetId(), e)
+ case *exprpb.Expr_CreateStruct_Entry_MapKey:
+ return exprMapEntry(factory, e.GetId(), e)
+ }
+ return nil, fmt.Errorf("unsupported expr entry kind: %v", e)
+}
+
+func exprInternal(factory ExprFactory, e *exprpb.Expr) (Expr, error) {
+ id := e.GetId()
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_CallExpr:
+ return exprCall(factory, id, e.GetCallExpr())
+ case *exprpb.Expr_ComprehensionExpr:
+ return exprComprehension(factory, id, e.GetComprehensionExpr())
+ case *exprpb.Expr_ConstExpr:
+ return exprLiteral(factory, id, e.GetConstExpr())
+ case *exprpb.Expr_IdentExpr:
+ return exprIdent(factory, id, e.GetIdentExpr())
+ case *exprpb.Expr_ListExpr:
+ return exprList(factory, id, e.GetListExpr())
+ case *exprpb.Expr_SelectExpr:
+ return exprSelect(factory, id, e.GetSelectExpr())
+ case *exprpb.Expr_StructExpr:
+ s := e.GetStructExpr()
+ if s.GetMessageName() != "" {
+ return exprStruct(factory, id, s)
+ }
+ return exprMap(factory, id, s)
+ }
+ return factory.NewUnspecifiedExpr(id), nil
+}
+
+func exprCall(factory ExprFactory, id int64, call *exprpb.Expr_Call) (Expr, error) {
+ var err error
+ args := make([]Expr, len(call.GetArgs()))
+ for i, a := range call.GetArgs() {
+ args[i], err = exprInternal(factory, a)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if call.GetTarget() == nil {
+ return factory.NewCall(id, call.GetFunction(), args...), nil
+ }
+
+ target, err := exprInternal(factory, call.GetTarget())
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewMemberCall(id, call.GetFunction(), target, args...), nil
+}
+
+func exprComprehension(factory ExprFactory, id int64, comp *exprpb.Expr_Comprehension) (Expr, error) {
+ iterRange, err := exprInternal(factory, comp.GetIterRange())
+ if err != nil {
+ return nil, err
+ }
+ accuInit, err := exprInternal(factory, comp.GetAccuInit())
+ if err != nil {
+ return nil, err
+ }
+ loopCond, err := exprInternal(factory, comp.GetLoopCondition())
+ if err != nil {
+ return nil, err
+ }
+ loopStep, err := exprInternal(factory, comp.GetLoopStep())
+ if err != nil {
+ return nil, err
+ }
+ result, err := exprInternal(factory, comp.GetResult())
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewComprehensionTwoVar(id,
+ iterRange,
+ comp.GetIterVar(),
+ comp.GetIterVar2(),
+ comp.GetAccuVar(),
+ accuInit,
+ loopCond,
+ loopStep,
+ result), nil
+}
+
+func exprLiteral(factory ExprFactory, id int64, c *exprpb.Constant) (Expr, error) {
+ val, err := ConstantToVal(c)
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewLiteral(id, val), nil
+}
+
+func exprIdent(factory ExprFactory, id int64, i *exprpb.Expr_Ident) (Expr, error) {
+ return factory.NewIdent(id, i.GetName()), nil
+}
+
+func exprList(factory ExprFactory, id int64, l *exprpb.Expr_CreateList) (Expr, error) {
+ elems := make([]Expr, len(l.GetElements()))
+ for i, e := range l.GetElements() {
+ elem, err := exprInternal(factory, e)
+ if err != nil {
+ return nil, err
+ }
+ elems[i] = elem
+ }
+ return factory.NewList(id, elems, l.GetOptionalIndices()), nil
+}
+
+func exprMap(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) {
+ entries := make([]EntryExpr, len(s.GetEntries()))
+ var err error
+ for i, entry := range s.GetEntries() {
+ entries[i], err = exprMapEntry(factory, entry.GetId(), entry)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return factory.NewMap(id, entries), nil
+}
+
+func exprMapEntry(factory ExprFactory, id int64, e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) {
+ k, err := exprInternal(factory, e.GetMapKey())
+ if err != nil {
+ return nil, err
+ }
+ v, err := exprInternal(factory, e.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewMapEntry(id, k, v, e.GetOptionalEntry()), nil
+}
+
+func exprSelect(factory ExprFactory, id int64, s *exprpb.Expr_Select) (Expr, error) {
+ op, err := exprInternal(factory, s.GetOperand())
+ if err != nil {
+ return nil, err
+ }
+ if s.GetTestOnly() {
+ return factory.NewPresenceTest(id, op, s.GetField()), nil
+ }
+ return factory.NewSelect(id, op, s.GetField()), nil
+}
+
+func exprStruct(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) {
+ fields := make([]EntryExpr, len(s.GetEntries()))
+ var err error
+ for i, field := range s.GetEntries() {
+ fields[i], err = exprStructField(factory, field.GetId(), field)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return factory.NewStruct(id, s.GetMessageName(), fields), nil
+}
+
+func exprStructField(factory ExprFactory, id int64, f *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) {
+ v, err := exprInternal(factory, f.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewStructField(id, f.GetFieldKey(), v, f.GetOptionalEntry()), nil
+}
+
+// ExprToProto serializes an ast.Expr value to a protobuf Expr representation.
+func ExprToProto(e Expr) (*exprpb.Expr, error) {
+ if e == nil {
+ return &exprpb.Expr{}, nil
+ }
+ switch e.Kind() {
+ case CallKind:
+ return protoCall(e.ID(), e.AsCall())
+ case ComprehensionKind:
+ return protoComprehension(e.ID(), e.AsComprehension())
+ case IdentKind:
+ return protoIdent(e.ID(), e.AsIdent())
+ case ListKind:
+ return protoList(e.ID(), e.AsList())
+ case LiteralKind:
+ return protoLiteral(e.ID(), e.AsLiteral())
+ case MapKind:
+ return protoMap(e.ID(), e.AsMap())
+ case SelectKind:
+ return protoSelect(e.ID(), e.AsSelect())
+ case StructKind:
+ return protoStruct(e.ID(), e.AsStruct())
+ case UnspecifiedExprKind:
+ // Handle the case where a macro reference may be getting translated.
+ // A nested macro 'pointer' is a non-zero expression id with no kind set.
+ if e.ID() != 0 {
+ return &exprpb.Expr{Id: e.ID()}, nil
+ }
+ return &exprpb.Expr{}, nil
+ }
+ return nil, fmt.Errorf("unsupported expr kind: %v", e)
+}
+
+// EntryExprToProto converts an ast.EntryExpr to a protobuf CreateStruct entry
+func EntryExprToProto(e EntryExpr) (*exprpb.Expr_CreateStruct_Entry, error) {
+ switch e.Kind() {
+ case MapEntryKind:
+ return protoMapEntry(e.ID(), e.AsMapEntry())
+ case StructFieldKind:
+ return protoStructField(e.ID(), e.AsStructField())
+ case UnspecifiedEntryExprKind:
+ return &exprpb.Expr_CreateStruct_Entry{}, nil
+ }
+ return nil, fmt.Errorf("unsupported expr entry kind: %v", e)
+}
+
+func protoCall(id int64, call CallExpr) (*exprpb.Expr, error) {
+ var err error
+ var target *exprpb.Expr
+ if call.IsMemberFunction() {
+ target, err = ExprToProto(call.Target())
+ if err != nil {
+ return nil, err
+ }
+ }
+ callArgs := call.Args()
+ args := make([]*exprpb.Expr, len(callArgs))
+ for i, a := range callArgs {
+ args[i], err = ExprToProto(a)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{
+ Function: call.FunctionName(),
+ Target: target,
+ Args: args,
+ },
+ },
+ }, nil
+}
+
+func protoComprehension(id int64, comp ComprehensionExpr) (*exprpb.Expr, error) {
+ iterRange, err := ExprToProto(comp.IterRange())
+ if err != nil {
+ return nil, err
+ }
+ accuInit, err := ExprToProto(comp.AccuInit())
+ if err != nil {
+ return nil, err
+ }
+ loopCond, err := ExprToProto(comp.LoopCondition())
+ if err != nil {
+ return nil, err
+ }
+ loopStep, err := ExprToProto(comp.LoopStep())
+ if err != nil {
+ return nil, err
+ }
+ result, err := ExprToProto(comp.Result())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_ComprehensionExpr{
+ ComprehensionExpr: &exprpb.Expr_Comprehension{
+ IterVar: comp.IterVar(),
+ IterVar2: comp.IterVar2(),
+ IterRange: iterRange,
+ AccuVar: comp.AccuVar(),
+ AccuInit: accuInit,
+ LoopCondition: loopCond,
+ LoopStep: loopStep,
+ Result: result,
+ },
+ },
+ }, nil
+}
+
+func protoIdent(id int64, name string) (*exprpb.Expr, error) {
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_IdentExpr{
+ IdentExpr: &exprpb.Expr_Ident{
+ Name: name,
+ },
+ },
+ }, nil
+}
+
+func protoList(id int64, list ListExpr) (*exprpb.Expr, error) {
+ var err error
+ elems := make([]*exprpb.Expr, list.Size())
+ for i, e := range list.Elements() {
+ elems[i], err = ExprToProto(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_ListExpr{
+ ListExpr: &exprpb.Expr_CreateList{
+ Elements: elems,
+ OptionalIndices: list.OptionalIndices(),
+ },
+ },
+ }, nil
+}
+
+func protoLiteral(id int64, val ref.Val) (*exprpb.Expr, error) {
+ c, err := ValToConstant(val)
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_ConstExpr{
+ ConstExpr: c,
+ },
+ }, nil
+}
+
+func protoMap(id int64, m MapExpr) (*exprpb.Expr, error) {
+ entries := make([]*exprpb.Expr_CreateStruct_Entry, len(m.Entries()))
+ var err error
+ for i, e := range m.Entries() {
+ entries[i], err = EntryExprToProto(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{
+ Entries: entries,
+ },
+ },
+ }, nil
+}
+
+func protoMapEntry(id int64, e MapEntry) (*exprpb.Expr_CreateStruct_Entry, error) {
+ k, err := ExprToProto(e.Key())
+ if err != nil {
+ return nil, err
+ }
+ v, err := ExprToProto(e.Value())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr_CreateStruct_Entry{
+ Id: id,
+ KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{
+ MapKey: k,
+ },
+ Value: v,
+ OptionalEntry: e.IsOptional(),
+ }, nil
+}
+
+func protoSelect(id int64, s SelectExpr) (*exprpb.Expr, error) {
+ op, err := ExprToProto(s.Operand())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_SelectExpr{
+ SelectExpr: &exprpb.Expr_Select{
+ Operand: op,
+ Field: s.FieldName(),
+ TestOnly: s.IsTestOnly(),
+ },
+ },
+ }, nil
+}
+
+func protoStruct(id int64, s StructExpr) (*exprpb.Expr, error) {
+ entries := make([]*exprpb.Expr_CreateStruct_Entry, len(s.Fields()))
+ var err error
+ for i, e := range s.Fields() {
+ entries[i], err = EntryExprToProto(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{
+ MessageName: s.TypeName(),
+ Entries: entries,
+ },
+ },
+ }, nil
+}
+
+func protoStructField(id int64, f StructField) (*exprpb.Expr_CreateStruct_Entry, error) {
+ v, err := ExprToProto(f.Value())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr_CreateStruct_Entry{
+ Id: id,
+ KeyKind: &exprpb.Expr_CreateStruct_Entry_FieldKey{
+ FieldKey: f.Name(),
+ },
+ Value: v,
+ OptionalEntry: f.IsOptional(),
+ }, nil
+}
+
+// SourceInfoToProto serializes an ast.SourceInfo value to a protobuf SourceInfo object.
+func SourceInfoToProto(info *SourceInfo) (*exprpb.SourceInfo, error) {
+ if info == nil {
+ return &exprpb.SourceInfo{}, nil
+ }
+ sourceInfo := &exprpb.SourceInfo{
+ SyntaxVersion: info.SyntaxVersion(),
+ Location: info.Description(),
+ LineOffsets: info.LineOffsets(),
+ Positions: make(map[int64]int32, len(info.OffsetRanges())),
+ MacroCalls: make(map[int64]*exprpb.Expr, len(info.MacroCalls())),
+ }
+ for id, offset := range info.OffsetRanges() {
+ sourceInfo.Positions[id] = offset.Start
+ }
+ for id, e := range info.MacroCalls() {
+ call, err := ExprToProto(e)
+ if err != nil {
+ return nil, err
+ }
+ sourceInfo.MacroCalls[id] = call
+ }
+ return sourceInfo, nil
+}
+
+// ProtoToSourceInfo deserializes the protobuf into a native SourceInfo value.
+func ProtoToSourceInfo(info *exprpb.SourceInfo) (*SourceInfo, error) {
+ sourceInfo := &SourceInfo{
+ syntax: info.GetSyntaxVersion(),
+ desc: info.GetLocation(),
+ lines: info.GetLineOffsets(),
+ offsetRanges: make(map[int64]OffsetRange, len(info.GetPositions())),
+ macroCalls: make(map[int64]Expr, len(info.GetMacroCalls())),
+ }
+ for id, offset := range info.GetPositions() {
+ sourceInfo.SetOffsetRange(id, OffsetRange{Start: offset, Stop: offset})
+ }
+ for id, e := range info.GetMacroCalls() {
+ call, err := ProtoToExpr(e)
+ if err != nil {
+ return nil, err
+ }
+ sourceInfo.SetMacroCall(id, call)
+ }
+ return sourceInfo, nil
+}
+
+// ReferenceInfoToProto converts a ReferenceInfo instance to a protobuf Reference suitable for serialization.
+func ReferenceInfoToProto(info *ReferenceInfo) (*exprpb.Reference, error) {
+ c, err := ValToConstant(info.Value)
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Reference{
+ Name: info.Name,
+ OverloadId: info.OverloadIDs,
+ Value: c,
+ }, nil
+}
+
+// ProtoToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance.
+func ProtoToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) {
+ v, err := ConstantToVal(ref.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return &ReferenceInfo{
+ Name: ref.GetName(),
+ OverloadIDs: ref.GetOverloadId(),
+ Value: v,
+ }, nil
+}
+
+// ValToConstant converts a CEL-native ref.Val to a protobuf Constant.
+//
+// Only simple scalar types are supported by this method.
+func ValToConstant(v ref.Val) (*exprpb.Constant, error) {
+ if v == nil {
+ return nil, nil
+ }
+ switch v.Type() {
+ case types.BoolType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil
+ case types.BytesType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil
+ case types.DoubleType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil
+ case types.IntType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil
+ case types.NullType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil
+ case types.StringType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil
+ case types.UintType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil
+ }
+ return nil, fmt.Errorf("unsupported constant kind: %v", v.Type())
+}
+
+// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val.
+func ConstantToVal(c *exprpb.Constant) (ref.Val, error) {
+ return AlphaProtoConstantAsVal(c)
+}
+
+// AlphaProtoConstantAsVal converts a v1alpha1.Constant protobuf to a CEL-native ref.Val.
+func AlphaProtoConstantAsVal(c *exprpb.Constant) (ref.Val, error) {
+ if c == nil {
+ return nil, nil
+ }
+ canonical := &celpb.Constant{}
+ if err := convertProto(c, canonical); err != nil {
+ return nil, err
+ }
+ return ProtoConstantAsVal(canonical)
+}
+
+// ProtoConstantAsVal converts a canonical celpb.Constant protobuf to a CEL-native ref.Val.
+func ProtoConstantAsVal(c *celpb.Constant) (ref.Val, error) {
+ switch c.GetConstantKind().(type) {
+ case *celpb.Constant_BoolValue:
+ return types.Bool(c.GetBoolValue()), nil
+ case *celpb.Constant_BytesValue:
+ return types.Bytes(c.GetBytesValue()), nil
+ case *celpb.Constant_DoubleValue:
+ return types.Double(c.GetDoubleValue()), nil
+ case *celpb.Constant_Int64Value:
+ return types.Int(c.GetInt64Value()), nil
+ case *celpb.Constant_NullValue:
+ return types.NullValue, nil
+ case *celpb.Constant_StringValue:
+ return types.String(c.GetStringValue()), nil
+ case *celpb.Constant_Uint64Value:
+ return types.Uint(c.GetUint64Value()), nil
+ }
+ return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind())
+}
+
+func convertProto(src, dst proto.Message) error {
+ pb, err := proto.Marshal(src)
+ if err != nil {
+ return err
+ }
+ err = proto.Unmarshal(pb, dst)
+ return err
+}
diff --git a/vendor/github.com/google/cel-go/common/ast/expr.go b/vendor/github.com/google/cel-go/common/ast/expr.go
index b63884a60..9f55cb3b9 100644
--- a/vendor/github.com/google/cel-go/common/ast/expr.go
+++ b/vendor/github.com/google/cel-go/common/ast/expr.go
@@ -15,168 +15,61 @@
package ast
import (
- "github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// ExprKind represents the expression node kind.
type ExprKind int
const (
- // UnspecifiedKind represents an unset expression with no specified properties.
- UnspecifiedKind ExprKind = iota
+ // UnspecifiedExprKind represents an unset expression with no specified properties.
+ UnspecifiedExprKind ExprKind = iota
- // LiteralKind represents a primitive scalar literal.
- LiteralKind
+ // CallKind represents a function call.
+ CallKind
+
+ // ComprehensionKind represents a comprehension expression generated by a macro.
+ ComprehensionKind
// IdentKind represents a simple variable, constant, or type identifier.
IdentKind
- // SelectKind represents a field selection expression.
- SelectKind
-
- // CallKind represents a function call.
- CallKind
-
// ListKind represents a list literal expression.
ListKind
+ // LiteralKind represents a primitive scalar literal.
+ LiteralKind
+
// MapKind represents a map literal expression.
MapKind
+ // SelectKind represents a field selection expression.
+ SelectKind
+
// StructKind represents a struct literal expression.
StructKind
-
- // ComprehensionKind represents a comprehension expression generated by a macro.
- ComprehensionKind
)
-// NavigateCheckedAST converts a CheckedAST to a NavigableExpr
-func NavigateCheckedAST(ast *CheckedAST) NavigableExpr {
- return newNavigableExpr(nil, ast.Expr, ast.TypeMap)
-}
-
-// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match.
-//
-// This function type should be use with the `Match` and `MatchList` calls.
-type ExprMatcher func(NavigableExpr) bool
-
-// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr
-// is comprised of all constant values, such as a simple literal or even list and map literal.
-func ConstantValueMatcher() ExprMatcher {
- return matchIsConstantValue
-}
-
-// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches
-// the specified `kind`.
-func KindMatcher(kind ExprKind) ExprMatcher {
- return func(e NavigableExpr) bool {
- return e.Kind() == kind
- }
-}
-
-// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose
-// function name is equal to `funcName`.
-func FunctionMatcher(funcName string) ExprMatcher {
- return func(e NavigableExpr) bool {
- if e.Kind() != CallKind {
- return false
- }
- return e.AsCall().FunctionName() == funcName
- }
-}
-
-// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list.
-//
-// Such a result would work well with subsequent MatchList calls.
-func AllMatcher() ExprMatcher {
- return func(NavigableExpr) bool {
- return true
- }
-}
-
-// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values of the
-// descendants which match.
-func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr {
- return matchListInternal([]NavigableExpr{expr}, matcher, true)
-}
-
-// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a
-// subset of NavigableExpr values which match.
-func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr {
- visit := make([]NavigableExpr, len(exprs))
- copy(visit, exprs)
- return matchListInternal(visit, matcher, false)
-}
-
-func matchListInternal(visit []NavigableExpr, matcher ExprMatcher, visitDescendants bool) []NavigableExpr {
- var matched []NavigableExpr
- for len(visit) != 0 {
- e := visit[0]
- if matcher(e) {
- matched = append(matched, e)
- }
- if visitDescendants {
- visit = append(visit[1:], e.Children()...)
- } else {
- visit = visit[1:]
- }
- }
- return matched
-}
-
-func matchIsConstantValue(e NavigableExpr) bool {
- if e.Kind() == LiteralKind {
- return true
- }
- if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind {
- for _, child := range e.Children() {
- if !matchIsConstantValue(child) {
- return false
- }
- }
- return true
- }
- return false
-}
-
-// NavigableExpr represents the base navigable expression value.
+// Expr represents the base expression node in a CEL abstract syntax tree.
//
-// Depending on the `Kind()` value, the NavigableExpr may be converted to a concrete expression types
+// Depending on the `Kind()` value, the Expr may be converted to a concrete expression types
// as indicated by the `As` methods.
-//
-// NavigableExpr values and their concrete expression types should be nil-safe. Conversion of an expr
-// to the wrong kind should produce a nil value.
-type NavigableExpr interface {
+type Expr interface {
// ID of the expression as it appears in the AST
ID() int64
// Kind of the expression node. See ExprKind for the valid enum values.
Kind() ExprKind
- // Type of the expression node.
- Type() *types.Type
-
- // Parent returns the parent expression node, if one exists.
- Parent() (NavigableExpr, bool)
-
- // Children returns a list of child expression nodes.
- Children() []NavigableExpr
-
- // ToExpr adapts this NavigableExpr to a protobuf representation.
- ToExpr() *exprpb.Expr
-
- // AsCall adapts the expr into a NavigableCallExpr
+ // AsCall adapts the expr into a CallExpr
//
// The Kind() must be equal to a CallKind for the conversion to be well-defined.
- AsCall() NavigableCallExpr
+ AsCall() CallExpr
- // AsComprehension adapts the expr into a NavigableComprehensionExpr.
+ // AsComprehension adapts the expr into a ComprehensionExpr.
//
// The Kind() must be equal to a ComprehensionKind for the conversion to be well-defined.
- AsComprehension() NavigableComprehensionExpr
+ AsComprehension() ComprehensionExpr
// AsIdent adapts the expr into an identifier string.
//
@@ -188,67 +81,123 @@ type NavigableExpr interface {
// The Kind() must be equal to a LiteralKind for the conversion to be well-defined.
AsLiteral() ref.Val
- // AsList adapts the expr into a NavigableListExpr.
+ // AsList adapts the expr into a ListExpr.
//
// The Kind() must be equal to a ListKind for the conversion to be well-defined.
- AsList() NavigableListExpr
+ AsList() ListExpr
- // AsMap adapts the expr into a NavigableMapExpr.
+ // AsMap adapts the expr into a MapExpr.
//
// The Kind() must be equal to a MapKind for the conversion to be well-defined.
- AsMap() NavigableMapExpr
+ AsMap() MapExpr
- // AsSelect adapts the expr into a NavigableSelectExpr.
+ // AsSelect adapts the expr into a SelectExpr.
//
// The Kind() must be equal to a SelectKind for the conversion to be well-defined.
- AsSelect() NavigableSelectExpr
+ AsSelect() SelectExpr
- // AsStruct adapts the expr into a NavigableStructExpr.
+ // AsStruct adapts the expr into a StructExpr.
//
// The Kind() must be equal to a StructKind for the conversion to be well-defined.
- AsStruct() NavigableStructExpr
+ AsStruct() StructExpr
- // marker interface method
- isNavigable()
+ // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids.
+ RenumberIDs(IDGenerator)
+
+ // SetKindCase replaces the contents of the current expression with the contents of the other.
+ //
+ // The SetKindCase takes ownership of any expression instances references within the input Expr.
+ // A shallow copy is made of the Expr value itself, but not a deep one.
+ //
+ // This method should only be used during AST rewrites using temporary Expr values.
+ SetKindCase(Expr)
+
+ // isExpr is a marker interface.
+ isExpr()
}
-// NavigableCallExpr defines an interface for inspecting a function call and its arugments.
-type NavigableCallExpr interface {
+// EntryExprKind represents the possible EntryExpr kinds.
+type EntryExprKind int
+
+const (
+ // UnspecifiedEntryExprKind indicates that the entry expr is not set.
+ UnspecifiedEntryExprKind EntryExprKind = iota
+
+ // MapEntryKind indicates that the entry is a MapEntry type with key and value expressions.
+ MapEntryKind
+
+ // StructFieldKind indicates that the entry is a StructField with a field name and initializer
+ // expression.
+ StructFieldKind
+)
+
+// EntryExpr represents the base entry expression in a CEL map or struct literal.
+type EntryExpr interface {
+ // ID of the entry as it appears in the AST.
+ ID() int64
+
+ // Kind of the entry expression node. See EntryExprKind for valid enum values.
+ Kind() EntryExprKind
+
+ // AsMapEntry casts the EntryExpr to a MapEntry.
+ //
+ // The Kind() must be equal to MapEntryKind for the conversion to be well-defined.
+ AsMapEntry() MapEntry
+
+ // AsStructField casts the EntryExpr to a StructField
+ //
+ // The Kind() must be equal to StructFieldKind for the conversion to be well-defined.
+ AsStructField() StructField
+
+ // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids.
+ RenumberIDs(IDGenerator)
+
+ isEntryExpr()
+}
+
+// IDGenerator produces unique ids suitable for tagging expression nodes
+type IDGenerator func(originalID int64) int64
+
+// CallExpr defines an interface for inspecting a function call and its arguments.
+type CallExpr interface {
// FunctionName returns the name of the function.
FunctionName() string
+ // IsMemberFunction returns whether the call has a non-nil target indicating it is a member function
+ IsMemberFunction() bool
+
// Target returns the target of the expression if one is present.
- Target() NavigableExpr
+ Target() Expr
// Args returns the list of call arguments, excluding the target.
- Args() []NavigableExpr
-
- // ReturnType returns the result type of the call.
- ReturnType() *types.Type
+ Args() []Expr
// marker interface method
- isNavigable()
+ isExpr()
}
-// NavigableListExpr defines an interface for inspecting a list literal expression.
-type NavigableListExpr interface {
+// ListExpr defines an interface for inspecting a list literal expression.
+type ListExpr interface {
// Elements returns the list elements as navigable expressions.
- Elements() []NavigableExpr
+ Elements() []Expr
// OptionalIndicies returns the list of optional indices in the list literal.
OptionalIndices() []int32
+ // IsOptional indicates whether the given element index is optional.
+ IsOptional(int32) bool
+
// Size returns the number of elements in the list.
Size() int
// marker interface method
- isNavigable()
+ isExpr()
}
-// NavigableSelectExpr defines an interface for inspecting a select expression.
-type NavigableSelectExpr interface {
+// SelectExpr defines an interface for inspecting a select expression.
+type SelectExpr interface {
// Operand returns the selection operand expression.
- Operand() NavigableExpr
+ Operand() Expr
// FieldName returns the field name being selected from the operand.
FieldName() string
@@ -257,453 +206,679 @@ type NavigableSelectExpr interface {
IsTestOnly() bool
// marker interface method
- isNavigable()
+ isExpr()
}
-// NavigableMapExpr defines an interface for inspecting a map expression.
-type NavigableMapExpr interface {
- // Entries returns the map key value pairs as NavigableEntry values.
- Entries() []NavigableEntry
+// MapExpr defines an interface for inspecting a map expression.
+type MapExpr interface {
+ // Entries returns the map key value pairs as EntryExpr values.
+ Entries() []EntryExpr
// Size returns the number of entries in the map.
Size() int
// marker interface method
- isNavigable()
+ isExpr()
}
-// NavigableEntry defines an interface for inspecting a map entry.
-type NavigableEntry interface {
+// MapEntry defines an interface for inspecting a map entry.
+type MapEntry interface {
// Key returns the map entry key expression.
- Key() NavigableExpr
+ Key() Expr
// Value returns the map entry value expression.
- Value() NavigableExpr
+ Value() Expr
// IsOptional returns whether the entry is optional.
IsOptional() bool
// marker interface method
- isNavigable()
+ isEntryExpr()
}
-// NavigableStructExpr defines an interfaces for inspecting a struct and its field initializers.
-type NavigableStructExpr interface {
+// StructExpr defines an interfaces for inspecting a struct and its field initializers.
+type StructExpr interface {
// TypeName returns the struct type name.
TypeName() string
- // Fields returns the set of field initializers in the struct expression as NavigableField values.
- Fields() []NavigableField
+ // Fields returns the set of field initializers in the struct expression as EntryExpr values.
+ Fields() []EntryExpr
// marker interface method
- isNavigable()
+ isExpr()
}
-// NavigableField defines an interface for inspecting a struct field initialization.
-type NavigableField interface {
- // FieldName returns the name of the field.
- FieldName() string
+// StructField defines an interface for inspecting a struct field initialization.
+type StructField interface {
+ // Name returns the name of the field.
+ Name() string
// Value returns the field initialization expression.
- Value() NavigableExpr
+ Value() Expr
// IsOptional returns whether the field is optional.
IsOptional() bool
// marker interface method
- isNavigable()
+ isEntryExpr()
}
-// NavigableComprehensionExpr defines an interface for inspecting a comprehension expression.
-type NavigableComprehensionExpr interface {
+// ComprehensionExpr defines an interface for inspecting a comprehension expression.
+type ComprehensionExpr interface {
// IterRange returns the iteration range expression.
- IterRange() NavigableExpr
+ IterRange() Expr
// IterVar returns the iteration variable name.
+ //
+ // For one-variable comprehensions, the iter var refers to the element value
+ // when iterating over a list, or the map key when iterating over a map.
+ //
+ // For two-variable comprehneions, the iter var refers to the list index or the
+ // map key.
IterVar() string
+ // IterVar2 returns the second iteration variable name.
+ //
+ // When the value is non-empty, the comprehension is a two-variable comprehension.
+ IterVar2() string
+
+ // HasIterVar2 returns true if the second iteration variable is non-empty.
+ HasIterVar2() bool
+
// AccuVar returns the accumulation variable name.
AccuVar() string
// AccuInit returns the accumulation variable initialization expression.
- AccuInit() NavigableExpr
+ AccuInit() Expr
// LoopCondition returns the loop condition expression.
- LoopCondition() NavigableExpr
+ LoopCondition() Expr
// LoopStep returns the loop step expression.
- LoopStep() NavigableExpr
+ LoopStep() Expr
// Result returns the comprehension result expression.
- Result() NavigableExpr
+ Result() Expr
// marker interface method
- isNavigable()
+ isExpr()
+}
+
+var _ Expr = &expr{}
+
+type expr struct {
+ id int64
+ exprKindCase
+}
+
+type exprKindCase interface {
+ Kind() ExprKind
+
+ renumberIDs(IDGenerator)
+
+ isExpr()
}
-func newNavigableExpr(parent NavigableExpr, expr *exprpb.Expr, typeMap map[int64]*types.Type) NavigableExpr {
- kind, factory := kindOf(expr)
- nav := &navigableExprImpl{
- parent: parent,
- kind: kind,
- expr: expr,
- typeMap: typeMap,
- createChildren: factory,
+func (e *expr) ID() int64 {
+ if e == nil {
+ return 0
}
- return nav
+ return e.id
}
-type navigableExprImpl struct {
- parent NavigableExpr
- kind ExprKind
- expr *exprpb.Expr
- typeMap map[int64]*types.Type
- createChildren childFactory
+func (e *expr) Kind() ExprKind {
+ if e == nil || e.exprKindCase == nil {
+ return UnspecifiedExprKind
+ }
+ return e.exprKindCase.Kind()
}
-func (nav *navigableExprImpl) ID() int64 {
- return nav.ToExpr().GetId()
+func (e *expr) AsCall() CallExpr {
+ if e.Kind() != CallKind {
+ return nilCall
+ }
+ return e.exprKindCase.(CallExpr)
}
-func (nav *navigableExprImpl) Kind() ExprKind {
- return nav.kind
+func (e *expr) AsComprehension() ComprehensionExpr {
+ if e.Kind() != ComprehensionKind {
+ return nilCompre
+ }
+ return e.exprKindCase.(ComprehensionExpr)
}
-func (nav *navigableExprImpl) Type() *types.Type {
- if t, found := nav.typeMap[nav.ID()]; found {
- return t
+func (e *expr) AsIdent() string {
+ if e.Kind() != IdentKind {
+ return ""
}
- return types.DynType
+ return string(e.exprKindCase.(baseIdentExpr))
}
-func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) {
- if nav.parent != nil {
- return nav.parent, true
+func (e *expr) AsLiteral() ref.Val {
+ if e.Kind() != LiteralKind {
+ return nil
}
- return nil, false
+ return e.exprKindCase.(*baseLiteral).Val
}
-func (nav *navigableExprImpl) Children() []NavigableExpr {
- return nav.createChildren(nav)
+func (e *expr) AsList() ListExpr {
+ if e.Kind() != ListKind {
+ return nilList
+ }
+ return e.exprKindCase.(ListExpr)
}
-func (nav *navigableExprImpl) ToExpr() *exprpb.Expr {
- return nav.expr
+func (e *expr) AsMap() MapExpr {
+ if e.Kind() != MapKind {
+ return nilMap
+ }
+ return e.exprKindCase.(MapExpr)
}
-func (nav *navigableExprImpl) AsCall() NavigableCallExpr {
- return navigableCallImpl{navigableExprImpl: nav}
+func (e *expr) AsSelect() SelectExpr {
+ if e.Kind() != SelectKind {
+ return nilSel
+ }
+ return e.exprKindCase.(SelectExpr)
}
-func (nav *navigableExprImpl) AsComprehension() NavigableComprehensionExpr {
- return navigableComprehensionImpl{navigableExprImpl: nav}
+func (e *expr) AsStruct() StructExpr {
+ if e.Kind() != StructKind {
+ return nilStruct
+ }
+ return e.exprKindCase.(StructExpr)
}
-func (nav *navigableExprImpl) AsIdent() string {
- return nav.ToExpr().GetIdentExpr().GetName()
+func (e *expr) SetKindCase(other Expr) {
+ if e == nil {
+ return
+ }
+ if other == nil {
+ e.exprKindCase = nil
+ return
+ }
+ switch other.Kind() {
+ case CallKind:
+ c := other.AsCall()
+ e.exprKindCase = &baseCallExpr{
+ function: c.FunctionName(),
+ target: c.Target(),
+ args: c.Args(),
+ isMember: c.IsMemberFunction(),
+ }
+ case ComprehensionKind:
+ c := other.AsComprehension()
+ e.exprKindCase = &baseComprehensionExpr{
+ iterRange: c.IterRange(),
+ iterVar: c.IterVar(),
+ iterVar2: c.IterVar2(),
+ accuVar: c.AccuVar(),
+ accuInit: c.AccuInit(),
+ loopCond: c.LoopCondition(),
+ loopStep: c.LoopStep(),
+ result: c.Result(),
+ }
+ case IdentKind:
+ e.exprKindCase = baseIdentExpr(other.AsIdent())
+ case ListKind:
+ l := other.AsList()
+ optIndexMap := make(map[int32]struct{}, len(l.OptionalIndices()))
+ for _, idx := range l.OptionalIndices() {
+ optIndexMap[idx] = struct{}{}
+ }
+ e.exprKindCase = &baseListExpr{
+ elements: l.Elements(),
+ optIndices: l.OptionalIndices(),
+ optIndexMap: optIndexMap,
+ }
+ case LiteralKind:
+ e.exprKindCase = &baseLiteral{Val: other.AsLiteral()}
+ case MapKind:
+ e.exprKindCase = &baseMapExpr{
+ entries: other.AsMap().Entries(),
+ }
+ case SelectKind:
+ s := other.AsSelect()
+ e.exprKindCase = &baseSelectExpr{
+ operand: s.Operand(),
+ field: s.FieldName(),
+ testOnly: s.IsTestOnly(),
+ }
+ case StructKind:
+ s := other.AsStruct()
+ e.exprKindCase = &baseStructExpr{
+ typeName: s.TypeName(),
+ fields: s.Fields(),
+ }
+ case UnspecifiedExprKind:
+ e.exprKindCase = nil
+ }
}
-func (nav *navigableExprImpl) AsLiteral() ref.Val {
- if nav.Kind() != LiteralKind {
- return nil
+func (e *expr) RenumberIDs(idGen IDGenerator) {
+ if e == nil {
+ return
}
- val, err := ConstantToVal(nav.ToExpr().GetConstExpr())
- if err != nil {
- panic(err)
+ e.id = idGen(e.id)
+ if e.exprKindCase != nil {
+ e.exprKindCase.renumberIDs(idGen)
}
- return val
}
-func (nav *navigableExprImpl) AsList() NavigableListExpr {
- return navigableListImpl{navigableExprImpl: nav}
+type baseCallExpr struct {
+ function string
+ target Expr
+ args []Expr
+ isMember bool
}
-func (nav *navigableExprImpl) AsMap() NavigableMapExpr {
- return navigableMapImpl{navigableExprImpl: nav}
+func (*baseCallExpr) Kind() ExprKind {
+ return CallKind
}
-func (nav *navigableExprImpl) AsSelect() NavigableSelectExpr {
- return navigableSelectImpl{navigableExprImpl: nav}
+func (e *baseCallExpr) FunctionName() string {
+ if e == nil {
+ return ""
+ }
+ return e.function
}
-func (nav *navigableExprImpl) AsStruct() NavigableStructExpr {
- return navigableStructImpl{navigableExprImpl: nav}
+func (e *baseCallExpr) IsMemberFunction() bool {
+ if e == nil {
+ return false
+ }
+ return e.isMember
}
-func (nav *navigableExprImpl) createChild(e *exprpb.Expr) NavigableExpr {
- return newNavigableExpr(nav, e, nav.typeMap)
+func (e *baseCallExpr) Target() Expr {
+ if e == nil || !e.IsMemberFunction() {
+ return nilExpr
+ }
+ return e.target
}
-func (nav *navigableExprImpl) isNavigable() {}
+func (e *baseCallExpr) Args() []Expr {
+ if e == nil {
+ return []Expr{}
+ }
+ return e.args
+}
-type navigableCallImpl struct {
- *navigableExprImpl
+func (e *baseCallExpr) renumberIDs(idGen IDGenerator) {
+ if e.IsMemberFunction() {
+ e.Target().RenumberIDs(idGen)
+ }
+ for _, arg := range e.Args() {
+ arg.RenumberIDs(idGen)
+ }
}
-func (call navigableCallImpl) FunctionName() string {
- return call.ToExpr().GetCallExpr().GetFunction()
+func (*baseCallExpr) isExpr() {}
+
+var _ ComprehensionExpr = &baseComprehensionExpr{}
+
+type baseComprehensionExpr struct {
+ iterRange Expr
+ iterVar string
+ iterVar2 string
+ accuVar string
+ accuInit Expr
+ loopCond Expr
+ loopStep Expr
+ result Expr
}
-func (call navigableCallImpl) Target() NavigableExpr {
- t := call.ToExpr().GetCallExpr().GetTarget()
- if t != nil {
- return call.createChild(t)
- }
- return nil
+func (*baseComprehensionExpr) Kind() ExprKind {
+ return ComprehensionKind
}
-func (call navigableCallImpl) Args() []NavigableExpr {
- args := call.ToExpr().GetCallExpr().GetArgs()
- navArgs := make([]NavigableExpr, len(args))
- for i, a := range args {
- navArgs[i] = call.createChild(a)
+func (e *baseComprehensionExpr) IterRange() Expr {
+ if e == nil {
+ return nilExpr
}
- return navArgs
+ return e.iterRange
}
-func (call navigableCallImpl) ReturnType() *types.Type {
- return call.Type()
+func (e *baseComprehensionExpr) IterVar() string {
+ return e.iterVar
}
-type navigableComprehensionImpl struct {
- *navigableExprImpl
+func (e *baseComprehensionExpr) IterVar2() string {
+ return e.iterVar2
}
-func (comp navigableComprehensionImpl) IterRange() NavigableExpr {
- return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetIterRange())
+func (e *baseComprehensionExpr) HasIterVar2() bool {
+ return e.iterVar2 != ""
}
-func (comp navigableComprehensionImpl) IterVar() string {
- return comp.ToExpr().GetComprehensionExpr().GetIterVar()
+func (e *baseComprehensionExpr) AccuVar() string {
+ return e.accuVar
}
-func (comp navigableComprehensionImpl) AccuVar() string {
- return comp.ToExpr().GetComprehensionExpr().GetAccuVar()
+func (e *baseComprehensionExpr) AccuInit() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.accuInit
}
-func (comp navigableComprehensionImpl) AccuInit() NavigableExpr {
- return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetAccuInit())
+func (e *baseComprehensionExpr) LoopCondition() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.loopCond
}
-func (comp navigableComprehensionImpl) LoopCondition() NavigableExpr {
- return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopCondition())
+func (e *baseComprehensionExpr) LoopStep() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.loopStep
}
-func (comp navigableComprehensionImpl) LoopStep() NavigableExpr {
- return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopStep())
+func (e *baseComprehensionExpr) Result() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.result
}
-func (comp navigableComprehensionImpl) Result() NavigableExpr {
- return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetResult())
+func (e *baseComprehensionExpr) renumberIDs(idGen IDGenerator) {
+ e.IterRange().RenumberIDs(idGen)
+ e.AccuInit().RenumberIDs(idGen)
+ e.LoopCondition().RenumberIDs(idGen)
+ e.LoopStep().RenumberIDs(idGen)
+ e.Result().RenumberIDs(idGen)
}
-type navigableListImpl struct {
- *navigableExprImpl
+func (*baseComprehensionExpr) isExpr() {}
+
+var _ exprKindCase = baseIdentExpr("")
+
+type baseIdentExpr string
+
+func (baseIdentExpr) Kind() ExprKind {
+ return IdentKind
}
-func (l navigableListImpl) Elements() []NavigableExpr {
- return l.Children()
+func (e baseIdentExpr) renumberIDs(IDGenerator) {}
+
+func (baseIdentExpr) isExpr() {}
+
+var _ exprKindCase = &baseLiteral{}
+var _ ref.Val = &baseLiteral{}
+
+type baseLiteral struct {
+ ref.Val
}
-func (l navigableListImpl) OptionalIndices() []int32 {
- return l.ToExpr().GetListExpr().GetOptionalIndices()
+func (*baseLiteral) Kind() ExprKind {
+ return LiteralKind
}
-func (l navigableListImpl) Size() int {
- return len(l.ToExpr().GetListExpr().GetElements())
+func (l *baseLiteral) renumberIDs(IDGenerator) {}
+
+func (*baseLiteral) isExpr() {}
+
+var _ ListExpr = &baseListExpr{}
+
+type baseListExpr struct {
+ elements []Expr
+ optIndices []int32
+ optIndexMap map[int32]struct{}
}
-type navigableMapImpl struct {
- *navigableExprImpl
+func (*baseListExpr) Kind() ExprKind {
+ return ListKind
}
-func (m navigableMapImpl) Entries() []NavigableEntry {
- mapExpr := m.ToExpr().GetStructExpr()
- entries := make([]NavigableEntry, len(mapExpr.GetEntries()))
- for i, e := range mapExpr.GetEntries() {
- entries[i] = navigableEntryImpl{
- key: m.createChild(e.GetMapKey()),
- val: m.createChild(e.GetValue()),
- isOpt: e.GetOptionalEntry(),
- }
+func (e *baseListExpr) Elements() []Expr {
+ if e == nil {
+ return []Expr{}
}
- return entries
+ return e.elements
}
-func (m navigableMapImpl) Size() int {
- return len(m.ToExpr().GetStructExpr().GetEntries())
+func (e *baseListExpr) IsOptional(index int32) bool {
+ _, found := e.optIndexMap[index]
+ return found
}
-type navigableEntryImpl struct {
- key NavigableExpr
- val NavigableExpr
- isOpt bool
+func (e *baseListExpr) OptionalIndices() []int32 {
+ if e == nil {
+ return []int32{}
+ }
+ return e.optIndices
}
-func (e navigableEntryImpl) Key() NavigableExpr {
- return e.key
+func (e *baseListExpr) Size() int {
+ return len(e.Elements())
}
-func (e navigableEntryImpl) Value() NavigableExpr {
- return e.val
+func (e *baseListExpr) renumberIDs(idGen IDGenerator) {
+ for _, elem := range e.Elements() {
+ elem.RenumberIDs(idGen)
+ }
}
-func (e navigableEntryImpl) IsOptional() bool {
- return e.isOpt
+func (*baseListExpr) isExpr() {}
+
+type baseMapExpr struct {
+ entries []EntryExpr
}
-func (e navigableEntryImpl) isNavigable() {}
+func (*baseMapExpr) Kind() ExprKind {
+ return MapKind
+}
-type navigableSelectImpl struct {
- *navigableExprImpl
+func (e *baseMapExpr) Entries() []EntryExpr {
+ if e == nil {
+ return []EntryExpr{}
+ }
+ return e.entries
}
-func (sel navigableSelectImpl) FieldName() string {
- return sel.ToExpr().GetSelectExpr().GetField()
+func (e *baseMapExpr) Size() int {
+ return len(e.Entries())
}
-func (sel navigableSelectImpl) IsTestOnly() bool {
- return sel.ToExpr().GetSelectExpr().GetTestOnly()
+func (e *baseMapExpr) renumberIDs(idGen IDGenerator) {
+ for _, entry := range e.Entries() {
+ entry.RenumberIDs(idGen)
+ }
}
-func (sel navigableSelectImpl) Operand() NavigableExpr {
- return sel.createChild(sel.ToExpr().GetSelectExpr().GetOperand())
+func (*baseMapExpr) isExpr() {}
+
+type baseSelectExpr struct {
+ operand Expr
+ field string
+ testOnly bool
}
-type navigableStructImpl struct {
- *navigableExprImpl
+func (*baseSelectExpr) Kind() ExprKind {
+ return SelectKind
}
-func (s navigableStructImpl) TypeName() string {
- return s.ToExpr().GetStructExpr().GetMessageName()
+func (e *baseSelectExpr) Operand() Expr {
+ if e == nil || e.operand == nil {
+ return nilExpr
+ }
+ return e.operand
}
-func (s navigableStructImpl) Fields() []NavigableField {
- fieldInits := s.ToExpr().GetStructExpr().GetEntries()
- fields := make([]NavigableField, len(fieldInits))
- for i, f := range fieldInits {
- fields[i] = navigableFieldImpl{
- name: f.GetFieldKey(),
- val: s.createChild(f.GetValue()),
- isOpt: f.GetOptionalEntry(),
- }
+func (e *baseSelectExpr) FieldName() string {
+ if e == nil {
+ return ""
+ }
+ return e.field
+}
+
+func (e *baseSelectExpr) IsTestOnly() bool {
+ if e == nil {
+ return false
}
- return fields
+ return e.testOnly
}
-type navigableFieldImpl struct {
- name string
- val NavigableExpr
- isOpt bool
+func (e *baseSelectExpr) renumberIDs(idGen IDGenerator) {
+ e.Operand().RenumberIDs(idGen)
}
-func (f navigableFieldImpl) FieldName() string {
- return f.name
+func (*baseSelectExpr) isExpr() {}
+
+type baseStructExpr struct {
+ typeName string
+ fields []EntryExpr
}
-func (f navigableFieldImpl) Value() NavigableExpr {
- return f.val
+func (*baseStructExpr) Kind() ExprKind {
+ return StructKind
}
-func (f navigableFieldImpl) IsOptional() bool {
- return f.isOpt
+func (e *baseStructExpr) TypeName() string {
+ if e == nil {
+ return ""
+ }
+ return e.typeName
}
-func (f navigableFieldImpl) isNavigable() {}
+func (e *baseStructExpr) Fields() []EntryExpr {
+ if e == nil {
+ return []EntryExpr{}
+ }
+ return e.fields
+}
-func kindOf(expr *exprpb.Expr) (ExprKind, childFactory) {
- switch expr.GetExprKind().(type) {
- case *exprpb.Expr_ConstExpr:
- return LiteralKind, noopFactory
- case *exprpb.Expr_IdentExpr:
- return IdentKind, noopFactory
- case *exprpb.Expr_SelectExpr:
- return SelectKind, selectFactory
- case *exprpb.Expr_CallExpr:
- return CallKind, callArgFactory
- case *exprpb.Expr_ListExpr:
- return ListKind, listElemFactory
- case *exprpb.Expr_StructExpr:
- if expr.GetStructExpr().GetMessageName() != "" {
- return StructKind, structEntryFactory
- }
- return MapKind, mapEntryFactory
- case *exprpb.Expr_ComprehensionExpr:
- return ComprehensionKind, comprehensionFactory
- default:
- return UnspecifiedKind, noopFactory
+func (e *baseStructExpr) renumberIDs(idGen IDGenerator) {
+ for _, f := range e.Fields() {
+ f.RenumberIDs(idGen)
}
}
-type childFactory func(*navigableExprImpl) []NavigableExpr
+func (*baseStructExpr) isExpr() {}
-func noopFactory(*navigableExprImpl) []NavigableExpr {
- return nil
+type entryExprKindCase interface {
+ Kind() EntryExprKind
+
+ renumberIDs(IDGenerator)
+
+ isEntryExpr()
+}
+
+var _ EntryExpr = &entryExpr{}
+
+type entryExpr struct {
+ id int64
+ entryExprKindCase
}
-func selectFactory(nav *navigableExprImpl) []NavigableExpr {
- return []NavigableExpr{
- nav.createChild(nav.ToExpr().GetSelectExpr().GetOperand()),
+func (e *entryExpr) ID() int64 {
+ return e.id
+}
+
+func (e *entryExpr) AsMapEntry() MapEntry {
+ if e.Kind() != MapEntryKind {
+ return nilMapEntry
}
+ return e.entryExprKindCase.(MapEntry)
}
-func callArgFactory(nav *navigableExprImpl) []NavigableExpr {
- call := nav.ToExpr().GetCallExpr()
- argCount := len(call.GetArgs())
- if call.GetTarget() != nil {
- argCount++
+func (e *entryExpr) AsStructField() StructField {
+ if e.Kind() != StructFieldKind {
+ return nilStructField
}
- navExprs := make([]NavigableExpr, argCount)
- i := 0
- if call.GetTarget() != nil {
- navExprs[i] = nav.createChild(call.GetTarget())
- i++
+ return e.entryExprKindCase.(StructField)
+}
+
+func (e *entryExpr) RenumberIDs(idGen IDGenerator) {
+ e.id = idGen(e.id)
+ e.entryExprKindCase.renumberIDs(idGen)
+}
+
+type baseMapEntry struct {
+ key Expr
+ value Expr
+ isOptional bool
+}
+
+func (e *baseMapEntry) Kind() EntryExprKind {
+ return MapEntryKind
+}
+
+func (e *baseMapEntry) Key() Expr {
+ if e == nil {
+ return nilExpr
}
- for _, arg := range call.GetArgs() {
- navExprs[i] = nav.createChild(arg)
- i++
+ return e.key
+}
+
+func (e *baseMapEntry) Value() Expr {
+ if e == nil {
+ return nilExpr
}
- return navExprs
+ return e.value
}
-func listElemFactory(nav *navigableExprImpl) []NavigableExpr {
- l := nav.ToExpr().GetListExpr()
- navExprs := make([]NavigableExpr, len(l.GetElements()))
- for i, e := range l.GetElements() {
- navExprs[i] = nav.createChild(e)
+func (e *baseMapEntry) IsOptional() bool {
+ if e == nil {
+ return false
}
- return navExprs
+ return e.isOptional
}
-func structEntryFactory(nav *navigableExprImpl) []NavigableExpr {
- s := nav.ToExpr().GetStructExpr()
- entries := make([]NavigableExpr, len(s.GetEntries()))
- for i, e := range s.GetEntries() {
+func (e *baseMapEntry) renumberIDs(idGen IDGenerator) {
+ e.Key().RenumberIDs(idGen)
+ e.Value().RenumberIDs(idGen)
+}
+
+func (*baseMapEntry) isEntryExpr() {}
+
+type baseStructField struct {
+ field string
+ value Expr
+ isOptional bool
+}
+
+func (f *baseStructField) Kind() EntryExprKind {
+ return StructFieldKind
+}
- entries[i] = nav.createChild(e.GetValue())
+func (f *baseStructField) Name() string {
+ if f == nil {
+ return ""
}
- return entries
+ return f.field
}
-func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr {
- s := nav.ToExpr().GetStructExpr()
- entries := make([]NavigableExpr, len(s.GetEntries())*2)
- j := 0
- for _, e := range s.GetEntries() {
- entries[j] = nav.createChild(e.GetMapKey())
- entries[j+1] = nav.createChild(e.GetValue())
- j += 2
+func (f *baseStructField) Value() Expr {
+ if f == nil {
+ return nilExpr
}
- return entries
+ return f.value
}
-func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr {
- compre := nav.ToExpr().GetComprehensionExpr()
- return []NavigableExpr{
- nav.createChild(compre.GetIterRange()),
- nav.createChild(compre.GetAccuInit()),
- nav.createChild(compre.GetLoopCondition()),
- nav.createChild(compre.GetLoopStep()),
- nav.createChild(compre.GetResult()),
+func (f *baseStructField) IsOptional() bool {
+ if f == nil {
+ return false
}
+ return f.isOptional
}
+
+func (f *baseStructField) renumberIDs(idGen IDGenerator) {
+ f.Value().RenumberIDs(idGen)
+}
+
+func (*baseStructField) isEntryExpr() {}
+
+var (
+ nilExpr *expr = nil
+ nilCall *baseCallExpr = nil
+ nilCompre *baseComprehensionExpr = nil
+ nilList *baseListExpr = nil
+ nilMap *baseMapExpr = nil
+ nilMapEntry *baseMapEntry = nil
+ nilSel *baseSelectExpr = nil
+ nilStruct *baseStructExpr = nil
+ nilStructField *baseStructField = nil
+)
diff --git a/vendor/github.com/google/cel-go/common/ast/factory.go b/vendor/github.com/google/cel-go/common/ast/factory.go
new file mode 100644
index 000000000..d4dcde4d9
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/ast/factory.go
@@ -0,0 +1,332 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+import "github.com/google/cel-go/common/types/ref"
+
+// ExprFactory interfaces defines a set of methods necessary for building native expression values.
+type ExprFactory interface {
+ // CopyExpr creates a deep copy of the input Expr value.
+ CopyExpr(Expr) Expr
+
+ // CopyEntryExpr creates a deep copy of the input EntryExpr value.
+ CopyEntryExpr(EntryExpr) EntryExpr
+
+ // NewCall creates an Expr value representing a global function call.
+ NewCall(id int64, function string, args ...Expr) Expr
+
+ // NewComprehension creates an Expr value representing a one-variable comprehension over a value range.
+ NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr
+
+ // NewComprehensionTwoVar creates an Expr value representing a two-variable comprehension over a value range.
+ NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr
+
+ // NewMemberCall creates an Expr value representing a member function call.
+ NewMemberCall(id int64, function string, receiver Expr, args ...Expr) Expr
+
+ // NewIdent creates an Expr value representing an identifier.
+ NewIdent(id int64, name string) Expr
+
+ // NewAccuIdent creates an Expr value representing an accumulator identifier within a
+ // comprehension.
+ NewAccuIdent(id int64) Expr
+
+ // AccuIdentName reports the name of the accumulator variable to be used within a comprehension.
+ AccuIdentName() string
+
+ // NewLiteral creates an Expr value representing a literal value, such as a string or integer.
+ NewLiteral(id int64, value ref.Val) Expr
+
+ // NewList creates an Expr value representing a list literal expression with optional indices.
+ //
+ // Optional indices will typically be empty unless the CEL optional types are enabled.
+ NewList(id int64, elems []Expr, optIndices []int32) Expr
+
+ // NewMap creates an Expr value representing a map literal expression
+ NewMap(id int64, entries []EntryExpr) Expr
+
+ // NewMapEntry creates a MapEntry with a given key, value, and a flag indicating whether
+ // the key is optionally set.
+ NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr
+
+ // NewPresenceTest creates an Expr representing a field presence test on an operand expression.
+ NewPresenceTest(id int64, operand Expr, field string) Expr
+
+ // NewSelect creates an Expr representing a field selection on an operand expression.
+ NewSelect(id int64, operand Expr, field string) Expr
+
+ // NewStruct creates an Expr value representing a struct literal with a given type name and a
+ // set of field initializers.
+ NewStruct(id int64, typeName string, fields []EntryExpr) Expr
+
+ // NewStructField creates a StructField with a given field name, value, and a flag indicating
+ // whether the field is optionally set.
+ NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr
+
+ // NewUnspecifiedExpr creates an empty expression node.
+ NewUnspecifiedExpr(id int64) Expr
+
+ isExprFactory()
+}
+
+type baseExprFactory struct {
+ accumulatorName string
+}
+
+// NewExprFactory creates an ExprFactory instance.
+func NewExprFactory() ExprFactory {
+ return &baseExprFactory{
+ "@result",
+ }
+}
+
+// NewExprFactoryWithAccumulator creates an ExprFactory instance with a custom
+// accumulator identifier name.
+func NewExprFactoryWithAccumulator(id string) ExprFactory {
+ return &baseExprFactory{
+ id,
+ }
+}
+
+func (fac *baseExprFactory) NewCall(id int64, function string, args ...Expr) Expr {
+ if len(args) == 0 {
+ args = []Expr{}
+ }
+ return fac.newExpr(
+ id,
+ &baseCallExpr{
+ function: function,
+ target: nilExpr,
+ args: args,
+ isMember: false,
+ })
+}
+
+func (fac *baseExprFactory) NewMemberCall(id int64, function string, target Expr, args ...Expr) Expr {
+ if len(args) == 0 {
+ args = []Expr{}
+ }
+ return fac.newExpr(
+ id,
+ &baseCallExpr{
+ function: function,
+ target: target,
+ args: args,
+ isMember: true,
+ })
+}
+
+func (fac *baseExprFactory) NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr {
+ // Set the iter_var2 to empty string to indicate the second variable is omitted
+ return fac.NewComprehensionTwoVar(id, iterRange, iterVar, "", accuVar, accuInit, loopCond, loopStep, result)
+}
+
+func (fac *baseExprFactory) NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr {
+ return fac.newExpr(
+ id,
+ &baseComprehensionExpr{
+ iterRange: iterRange,
+ iterVar: iterVar,
+ iterVar2: iterVar2,
+ accuVar: accuVar,
+ accuInit: accuInit,
+ loopCond: loopCond,
+ loopStep: loopStep,
+ result: result,
+ })
+}
+
+func (fac *baseExprFactory) NewIdent(id int64, name string) Expr {
+ return fac.newExpr(id, baseIdentExpr(name))
+}
+
+func (fac *baseExprFactory) NewAccuIdent(id int64) Expr {
+ return fac.NewIdent(id, fac.AccuIdentName())
+}
+
+func (fac *baseExprFactory) AccuIdentName() string {
+ return fac.accumulatorName
+}
+
+func (fac *baseExprFactory) NewLiteral(id int64, value ref.Val) Expr {
+ return fac.newExpr(id, &baseLiteral{Val: value})
+}
+
+func (fac *baseExprFactory) NewList(id int64, elems []Expr, optIndices []int32) Expr {
+ optIndexMap := make(map[int32]struct{}, len(optIndices))
+ for _, idx := range optIndices {
+ optIndexMap[idx] = struct{}{}
+ }
+ return fac.newExpr(id,
+ &baseListExpr{
+ elements: elems,
+ optIndices: optIndices,
+ optIndexMap: optIndexMap,
+ })
+}
+
+func (fac *baseExprFactory) NewMap(id int64, entries []EntryExpr) Expr {
+ return fac.newExpr(id, &baseMapExpr{entries: entries})
+}
+
+func (fac *baseExprFactory) NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr {
+ return fac.newEntryExpr(
+ id,
+ &baseMapEntry{
+ key: key,
+ value: value,
+ isOptional: isOptional,
+ })
+}
+
+func (fac *baseExprFactory) NewPresenceTest(id int64, operand Expr, field string) Expr {
+ return fac.newExpr(
+ id,
+ &baseSelectExpr{
+ operand: operand,
+ field: field,
+ testOnly: true,
+ })
+}
+
+func (fac *baseExprFactory) NewSelect(id int64, operand Expr, field string) Expr {
+ return fac.newExpr(
+ id,
+ &baseSelectExpr{
+ operand: operand,
+ field: field,
+ })
+}
+
+func (fac *baseExprFactory) NewStruct(id int64, typeName string, fields []EntryExpr) Expr {
+ return fac.newExpr(
+ id,
+ &baseStructExpr{
+ typeName: typeName,
+ fields: fields,
+ })
+}
+
+func (fac *baseExprFactory) NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr {
+ return fac.newEntryExpr(
+ id,
+ &baseStructField{
+ field: field,
+ value: value,
+ isOptional: isOptional,
+ })
+}
+
+func (fac *baseExprFactory) NewUnspecifiedExpr(id int64) Expr {
+ return fac.newExpr(id, nil)
+}
+
+func (fac *baseExprFactory) CopyExpr(e Expr) Expr {
+ // unwrap navigable expressions to avoid unnecessary allocations during copying.
+ if nav, ok := e.(*navigableExprImpl); ok {
+ e = nav.Expr
+ }
+ switch e.Kind() {
+ case CallKind:
+ c := e.AsCall()
+ argsCopy := make([]Expr, len(c.Args()))
+ for i, arg := range c.Args() {
+ argsCopy[i] = fac.CopyExpr(arg)
+ }
+ if !c.IsMemberFunction() {
+ return fac.NewCall(e.ID(), c.FunctionName(), argsCopy...)
+ }
+ return fac.NewMemberCall(e.ID(), c.FunctionName(), fac.CopyExpr(c.Target()), argsCopy...)
+ case ComprehensionKind:
+ compre := e.AsComprehension()
+ return fac.NewComprehensionTwoVar(e.ID(),
+ fac.CopyExpr(compre.IterRange()),
+ compre.IterVar(),
+ compre.IterVar2(),
+ compre.AccuVar(),
+ fac.CopyExpr(compre.AccuInit()),
+ fac.CopyExpr(compre.LoopCondition()),
+ fac.CopyExpr(compre.LoopStep()),
+ fac.CopyExpr(compre.Result()))
+ case IdentKind:
+ return fac.NewIdent(e.ID(), e.AsIdent())
+ case ListKind:
+ l := e.AsList()
+ elemsCopy := make([]Expr, l.Size())
+ for i, elem := range l.Elements() {
+ elemsCopy[i] = fac.CopyExpr(elem)
+ }
+ return fac.NewList(e.ID(), elemsCopy, l.OptionalIndices())
+ case LiteralKind:
+ return fac.NewLiteral(e.ID(), e.AsLiteral())
+ case MapKind:
+ m := e.AsMap()
+ entriesCopy := make([]EntryExpr, m.Size())
+ for i, entry := range m.Entries() {
+ entriesCopy[i] = fac.CopyEntryExpr(entry)
+ }
+ return fac.NewMap(e.ID(), entriesCopy)
+ case SelectKind:
+ s := e.AsSelect()
+ if s.IsTestOnly() {
+ return fac.NewPresenceTest(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName())
+ }
+ return fac.NewSelect(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName())
+ case StructKind:
+ s := e.AsStruct()
+ fieldsCopy := make([]EntryExpr, len(s.Fields()))
+ for i, field := range s.Fields() {
+ fieldsCopy[i] = fac.CopyEntryExpr(field)
+ }
+ return fac.NewStruct(e.ID(), s.TypeName(), fieldsCopy)
+ default:
+ return fac.NewUnspecifiedExpr(e.ID())
+ }
+}
+
+func (fac *baseExprFactory) CopyEntryExpr(e EntryExpr) EntryExpr {
+ switch e.Kind() {
+ case MapEntryKind:
+ entry := e.AsMapEntry()
+ return fac.NewMapEntry(e.ID(),
+ fac.CopyExpr(entry.Key()), fac.CopyExpr(entry.Value()), entry.IsOptional())
+ case StructFieldKind:
+ field := e.AsStructField()
+ return fac.NewStructField(e.ID(),
+ field.Name(), fac.CopyExpr(field.Value()), field.IsOptional())
+ default:
+ return fac.newEntryExpr(e.ID(), nil)
+ }
+}
+
+func (*baseExprFactory) isExprFactory() {}
+
+func (fac *baseExprFactory) newExpr(id int64, e exprKindCase) Expr {
+ return &expr{
+ id: id,
+ exprKindCase: e,
+ }
+}
+
+func (fac *baseExprFactory) newEntryExpr(id int64, e entryExprKindCase) EntryExpr {
+ return &entryExpr{
+ id: id,
+ entryExprKindCase: e,
+ }
+}
+
+var (
+ defaultFactory = &baseExprFactory{}
+)
diff --git a/vendor/github.com/google/cel-go/common/ast/navigable.go b/vendor/github.com/google/cel-go/common/ast/navigable.go
new file mode 100644
index 000000000..13e5777b5
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/ast/navigable.go
@@ -0,0 +1,665 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+import (
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// NavigableExpr represents the base navigable expression value with methods to inspect the
+// parent and child expressions.
+type NavigableExpr interface {
+ Expr
+
+ // Type of the expression.
+ //
+ // If the expression is type-checked, the type check metadata is returned. If the expression
+ // has not been type-checked, the types.DynType value is returned.
+ Type() *types.Type
+
+ // Parent returns the parent expression node, if one exists.
+ Parent() (NavigableExpr, bool)
+
+ // Children returns a list of child expression nodes.
+ Children() []NavigableExpr
+
+ // Depth indicates the depth in the expression tree.
+ //
+ // The root expression has depth 0.
+ Depth() int
+}
+
+// NavigateAST converts an AST to a NavigableExpr
+func NavigateAST(ast *AST) NavigableExpr {
+ return NavigateExpr(ast, ast.Expr())
+}
+
+// NavigateExpr creates a NavigableExpr whose type information is backed by the input AST.
+//
+// If the expression is already a NavigableExpr, the parent and depth information will be
+// propagated on the new NavigableExpr value; otherwise, the expr value will be treated
+// as though it is the root of the expression graph with a depth of 0.
+func NavigateExpr(ast *AST, expr Expr) NavigableExpr {
+ depth := 0
+ var parent NavigableExpr = nil
+ if nav, ok := expr.(NavigableExpr); ok {
+ depth = nav.Depth()
+ parent, _ = nav.Parent()
+ }
+ return newNavigableExpr(ast, parent, expr, depth)
+}
+
+// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match.
+//
+// This function type should be use with the `Match` and `MatchList` calls.
+type ExprMatcher func(NavigableExpr) bool
+
+// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr
+// is comprised of all constant values, such as a simple literal or even list and map literal.
+func ConstantValueMatcher() ExprMatcher {
+ return matchIsConstantValue
+}
+
+// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches
+// the specified `kind`.
+func KindMatcher(kind ExprKind) ExprMatcher {
+ return func(e NavigableExpr) bool {
+ return e.Kind() == kind
+ }
+}
+
+// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose
+// function name is equal to `funcName`.
+func FunctionMatcher(funcName string) ExprMatcher {
+ return func(e NavigableExpr) bool {
+ if e.Kind() != CallKind {
+ return false
+ }
+ return e.AsCall().FunctionName() == funcName
+ }
+}
+
+// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list.
+//
+// Such a result would work well with subsequent MatchList calls.
+func AllMatcher() ExprMatcher {
+ return func(NavigableExpr) bool {
+ return true
+ }
+}
+
+// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values
+// matching the input criteria in post-order (bottom up).
+func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr {
+ matches := []NavigableExpr{}
+ navVisitor := &baseVisitor{
+ visitExpr: func(e Expr) {
+ nav := e.(NavigableExpr)
+ if matcher(nav) {
+ matches = append(matches, nav)
+ }
+ },
+ }
+ visit(expr, navVisitor, postOrder, 0, 0)
+ return matches
+}
+
+// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a
+// subset of NavigableExpr values which match.
+func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr {
+ matches := []NavigableExpr{}
+ navVisitor := &baseVisitor{
+ visitExpr: func(e Expr) {
+ nav := e.(NavigableExpr)
+ if matcher(nav) {
+ matches = append(matches, nav)
+ }
+ },
+ }
+ for _, expr := range exprs {
+ visit(expr, navVisitor, postOrder, 0, 1)
+ }
+ return matches
+}
+
+// Visitor defines an object for visiting Expr and EntryExpr nodes within an expression graph.
+type Visitor interface {
+ // VisitExpr visits the input expression.
+ VisitExpr(Expr)
+
+ // VisitEntryExpr visits the input entry expression, i.e. a struct field or map entry.
+ VisitEntryExpr(EntryExpr)
+}
+
+type baseVisitor struct {
+ visitExpr func(Expr)
+ visitEntryExpr func(EntryExpr)
+}
+
+// VisitExpr visits the Expr if the internal expr visitor has been configured.
+func (v *baseVisitor) VisitExpr(e Expr) {
+ if v.visitExpr != nil {
+ v.visitExpr(e)
+ }
+}
+
+// VisitEntryExpr visits the entry if the internal expr entry visitor has been configured.
+func (v *baseVisitor) VisitEntryExpr(e EntryExpr) {
+ if v.visitEntryExpr != nil {
+ v.visitEntryExpr(e)
+ }
+}
+
+// NewExprVisitor creates a visitor which only visits expression nodes.
+func NewExprVisitor(v func(Expr)) Visitor {
+ return &baseVisitor{
+ visitExpr: v,
+ visitEntryExpr: nil,
+ }
+}
+
+// PostOrderVisit walks the expression graph and calls the visitor in post-order (bottom-up).
+func PostOrderVisit(expr Expr, visitor Visitor) {
+ visit(expr, visitor, postOrder, 0, 0)
+}
+
+// PreOrderVisit walks the expression graph and calls the visitor in pre-order (top-down).
+func PreOrderVisit(expr Expr, visitor Visitor) {
+ visit(expr, visitor, preOrder, 0, 0)
+}
+
+type visitOrder int
+
+const (
+ preOrder = iota + 1
+ postOrder
+)
+
+// TODO: consider exposing a way to configure a limit for the max visit depth.
+// It's possible that we could want to configure this on the NewExprVisitor()
+// and through MatchDescendents() / MaxID().
+func visit(expr Expr, visitor Visitor, order visitOrder, depth, maxDepth int) {
+ if maxDepth > 0 && depth == maxDepth {
+ return
+ }
+ if order == preOrder {
+ visitor.VisitExpr(expr)
+ }
+ switch expr.Kind() {
+ case CallKind:
+ c := expr.AsCall()
+ if c.IsMemberFunction() {
+ visit(c.Target(), visitor, order, depth+1, maxDepth)
+ }
+ for _, arg := range c.Args() {
+ visit(arg, visitor, order, depth+1, maxDepth)
+ }
+ case ComprehensionKind:
+ c := expr.AsComprehension()
+ visit(c.IterRange(), visitor, order, depth+1, maxDepth)
+ visit(c.AccuInit(), visitor, order, depth+1, maxDepth)
+ visit(c.LoopCondition(), visitor, order, depth+1, maxDepth)
+ visit(c.LoopStep(), visitor, order, depth+1, maxDepth)
+ visit(c.Result(), visitor, order, depth+1, maxDepth)
+ case ListKind:
+ l := expr.AsList()
+ for _, elem := range l.Elements() {
+ visit(elem, visitor, order, depth+1, maxDepth)
+ }
+ case MapKind:
+ m := expr.AsMap()
+ for _, e := range m.Entries() {
+ if order == preOrder {
+ visitor.VisitEntryExpr(e)
+ }
+ entry := e.AsMapEntry()
+ visit(entry.Key(), visitor, order, depth+1, maxDepth)
+ visit(entry.Value(), visitor, order, depth+1, maxDepth)
+ if order == postOrder {
+ visitor.VisitEntryExpr(e)
+ }
+ }
+ case SelectKind:
+ visit(expr.AsSelect().Operand(), visitor, order, depth+1, maxDepth)
+ case StructKind:
+ s := expr.AsStruct()
+ for _, f := range s.Fields() {
+ if order == preOrder {
+ visitor.VisitEntryExpr(f)
+ }
+ visit(f.AsStructField().Value(), visitor, order, depth+1, maxDepth)
+ if order == postOrder {
+ visitor.VisitEntryExpr(f)
+ }
+ }
+ }
+ if order == postOrder {
+ visitor.VisitExpr(expr)
+ }
+}
+
+func matchIsConstantValue(e NavigableExpr) bool {
+ if e.Kind() == LiteralKind {
+ return true
+ }
+ if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind {
+ for _, child := range e.Children() {
+ if !matchIsConstantValue(child) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func newNavigableExpr(ast *AST, parent NavigableExpr, expr Expr, depth int) NavigableExpr {
+ // Reduce navigable expression nesting by unwrapping the embedded Expr value.
+ if nav, ok := expr.(*navigableExprImpl); ok {
+ expr = nav.Expr
+ }
+ nav := &navigableExprImpl{
+ Expr: expr,
+ depth: depth,
+ ast: ast,
+ parent: parent,
+ createChildren: getChildFactory(expr),
+ }
+ return nav
+}
+
+type navigableExprImpl struct {
+ Expr
+ depth int
+ ast *AST
+ parent NavigableExpr
+ createChildren childFactory
+}
+
+func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) {
+ if nav.parent != nil {
+ return nav.parent, true
+ }
+ return nil, false
+}
+
+func (nav *navigableExprImpl) ID() int64 {
+ return nav.Expr.ID()
+}
+
+func (nav *navigableExprImpl) Kind() ExprKind {
+ return nav.Expr.Kind()
+}
+
+func (nav *navigableExprImpl) Type() *types.Type {
+ return nav.ast.GetType(nav.ID())
+}
+
+func (nav *navigableExprImpl) Children() []NavigableExpr {
+ return nav.createChildren(nav)
+}
+
+func (nav *navigableExprImpl) Depth() int {
+ return nav.depth
+}
+
+func (nav *navigableExprImpl) AsCall() CallExpr {
+ return navigableCallImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsComprehension() ComprehensionExpr {
+ return navigableComprehensionImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsIdent() string {
+ return nav.Expr.AsIdent()
+}
+
+func (nav *navigableExprImpl) AsList() ListExpr {
+ return navigableListImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsLiteral() ref.Val {
+ return nav.Expr.AsLiteral()
+}
+
+func (nav *navigableExprImpl) AsMap() MapExpr {
+ return navigableMapImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsSelect() SelectExpr {
+ return navigableSelectImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsStruct() StructExpr {
+ return navigableStructImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) createChild(e Expr) NavigableExpr {
+ return newNavigableExpr(nav.ast, nav, e, nav.depth+1)
+}
+
+func (nav *navigableExprImpl) isExpr() {}
+
+type navigableCallImpl struct {
+ *navigableExprImpl
+}
+
+func (call navigableCallImpl) FunctionName() string {
+ return call.Expr.AsCall().FunctionName()
+}
+
+func (call navigableCallImpl) IsMemberFunction() bool {
+ return call.Expr.AsCall().IsMemberFunction()
+}
+
+func (call navigableCallImpl) Target() Expr {
+ t := call.Expr.AsCall().Target()
+ if t != nil {
+ return call.createChild(t)
+ }
+ return nil
+}
+
+func (call navigableCallImpl) Args() []Expr {
+ args := call.Expr.AsCall().Args()
+ navArgs := make([]Expr, len(args))
+ for i, a := range args {
+ navArgs[i] = call.createChild(a)
+ }
+ return navArgs
+}
+
+type navigableComprehensionImpl struct {
+ *navigableExprImpl
+}
+
+func (comp navigableComprehensionImpl) IterRange() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().IterRange())
+}
+
+func (comp navigableComprehensionImpl) IterVar() string {
+ return comp.Expr.AsComprehension().IterVar()
+}
+
+func (comp navigableComprehensionImpl) IterVar2() string {
+ return comp.Expr.AsComprehension().IterVar2()
+}
+
+func (comp navigableComprehensionImpl) HasIterVar2() bool {
+ return comp.Expr.AsComprehension().HasIterVar2()
+}
+
+func (comp navigableComprehensionImpl) AccuVar() string {
+ return comp.Expr.AsComprehension().AccuVar()
+}
+
+func (comp navigableComprehensionImpl) AccuInit() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().AccuInit())
+}
+
+func (comp navigableComprehensionImpl) LoopCondition() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().LoopCondition())
+}
+
+func (comp navigableComprehensionImpl) LoopStep() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().LoopStep())
+}
+
+func (comp navigableComprehensionImpl) Result() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().Result())
+}
+
+type navigableListImpl struct {
+ *navigableExprImpl
+}
+
+func (l navigableListImpl) Elements() []Expr {
+ pbElems := l.Expr.AsList().Elements()
+ elems := make([]Expr, len(pbElems))
+ for i := 0; i < len(pbElems); i++ {
+ elems[i] = l.createChild(pbElems[i])
+ }
+ return elems
+}
+
+func (l navigableListImpl) IsOptional(index int32) bool {
+ return l.Expr.AsList().IsOptional(index)
+}
+
+func (l navigableListImpl) OptionalIndices() []int32 {
+ return l.Expr.AsList().OptionalIndices()
+}
+
+func (l navigableListImpl) Size() int {
+ return l.Expr.AsList().Size()
+}
+
+type navigableMapImpl struct {
+ *navigableExprImpl
+}
+
+func (m navigableMapImpl) Entries() []EntryExpr {
+ mapExpr := m.Expr.AsMap()
+ entries := make([]EntryExpr, len(mapExpr.Entries()))
+ for i, e := range mapExpr.Entries() {
+ entry := e.AsMapEntry()
+ entries[i] = &entryExpr{
+ id: e.ID(),
+ entryExprKindCase: navigableEntryImpl{
+ key: m.createChild(entry.Key()),
+ val: m.createChild(entry.Value()),
+ isOpt: entry.IsOptional(),
+ },
+ }
+ }
+ return entries
+}
+
+func (m navigableMapImpl) Size() int {
+ return m.Expr.AsMap().Size()
+}
+
+type navigableEntryImpl struct {
+ key NavigableExpr
+ val NavigableExpr
+ isOpt bool
+}
+
+func (e navigableEntryImpl) Kind() EntryExprKind {
+ return MapEntryKind
+}
+
+func (e navigableEntryImpl) Key() Expr {
+ return e.key
+}
+
+func (e navigableEntryImpl) Value() Expr {
+ return e.val
+}
+
+func (e navigableEntryImpl) IsOptional() bool {
+ return e.isOpt
+}
+
+func (e navigableEntryImpl) renumberIDs(IDGenerator) {}
+
+func (e navigableEntryImpl) isEntryExpr() {}
+
+type navigableSelectImpl struct {
+ *navigableExprImpl
+}
+
+func (sel navigableSelectImpl) FieldName() string {
+ return sel.Expr.AsSelect().FieldName()
+}
+
+func (sel navigableSelectImpl) IsTestOnly() bool {
+ return sel.Expr.AsSelect().IsTestOnly()
+}
+
+func (sel navigableSelectImpl) Operand() Expr {
+ return sel.createChild(sel.Expr.AsSelect().Operand())
+}
+
+type navigableStructImpl struct {
+ *navigableExprImpl
+}
+
+func (s navigableStructImpl) TypeName() string {
+ return s.Expr.AsStruct().TypeName()
+}
+
+func (s navigableStructImpl) Fields() []EntryExpr {
+ fieldInits := s.Expr.AsStruct().Fields()
+ fields := make([]EntryExpr, len(fieldInits))
+ for i, f := range fieldInits {
+ field := f.AsStructField()
+ fields[i] = &entryExpr{
+ id: f.ID(),
+ entryExprKindCase: navigableFieldImpl{
+ name: field.Name(),
+ val: s.createChild(field.Value()),
+ isOpt: field.IsOptional(),
+ },
+ }
+ }
+ return fields
+}
+
+type navigableFieldImpl struct {
+ name string
+ val NavigableExpr
+ isOpt bool
+}
+
+func (f navigableFieldImpl) Kind() EntryExprKind {
+ return StructFieldKind
+}
+
+func (f navigableFieldImpl) Name() string {
+ return f.name
+}
+
+func (f navigableFieldImpl) Value() Expr {
+ return f.val
+}
+
+func (f navigableFieldImpl) IsOptional() bool {
+ return f.isOpt
+}
+
+func (f navigableFieldImpl) renumberIDs(IDGenerator) {}
+
+func (f navigableFieldImpl) isEntryExpr() {}
+
+func getChildFactory(expr Expr) childFactory {
+ if expr == nil {
+ return noopFactory
+ }
+ switch expr.Kind() {
+ case LiteralKind:
+ return noopFactory
+ case IdentKind:
+ return noopFactory
+ case SelectKind:
+ return selectFactory
+ case CallKind:
+ return callArgFactory
+ case ListKind:
+ return listElemFactory
+ case MapKind:
+ return mapEntryFactory
+ case StructKind:
+ return structEntryFactory
+ case ComprehensionKind:
+ return comprehensionFactory
+ default:
+ return noopFactory
+ }
+}
+
+type childFactory func(*navigableExprImpl) []NavigableExpr
+
+func noopFactory(*navigableExprImpl) []NavigableExpr {
+ return nil
+}
+
+func selectFactory(nav *navigableExprImpl) []NavigableExpr {
+ return []NavigableExpr{nav.createChild(nav.AsSelect().Operand())}
+}
+
+func callArgFactory(nav *navigableExprImpl) []NavigableExpr {
+ call := nav.Expr.AsCall()
+ argCount := len(call.Args())
+ if call.IsMemberFunction() {
+ argCount++
+ }
+ navExprs := make([]NavigableExpr, argCount)
+ i := 0
+ if call.IsMemberFunction() {
+ navExprs[i] = nav.createChild(call.Target())
+ i++
+ }
+ for _, arg := range call.Args() {
+ navExprs[i] = nav.createChild(arg)
+ i++
+ }
+ return navExprs
+}
+
+func listElemFactory(nav *navigableExprImpl) []NavigableExpr {
+ l := nav.Expr.AsList()
+ navExprs := make([]NavigableExpr, len(l.Elements()))
+ for i, e := range l.Elements() {
+ navExprs[i] = nav.createChild(e)
+ }
+ return navExprs
+}
+
+func structEntryFactory(nav *navigableExprImpl) []NavigableExpr {
+ s := nav.Expr.AsStruct()
+ entries := make([]NavigableExpr, len(s.Fields()))
+ for i, e := range s.Fields() {
+ f := e.AsStructField()
+ entries[i] = nav.createChild(f.Value())
+ }
+ return entries
+}
+
+func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr {
+ m := nav.Expr.AsMap()
+ entries := make([]NavigableExpr, len(m.Entries())*2)
+ j := 0
+ for _, e := range m.Entries() {
+ mapEntry := e.AsMapEntry()
+ entries[j] = nav.createChild(mapEntry.Key())
+ entries[j+1] = nav.createChild(mapEntry.Value())
+ j += 2
+ }
+ return entries
+}
+
+func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr {
+ compre := nav.Expr.AsComprehension()
+ return []NavigableExpr{
+ nav.createChild(compre.IterRange()),
+ nav.createChild(compre.AccuInit()),
+ nav.createChild(compre.LoopCondition()),
+ nav.createChild(compre.LoopStep()),
+ nav.createChild(compre.Result()),
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/containers/BUILD.bazel b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
index 3f3f07887..81197f064 100644
--- a/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
@@ -12,7 +12,7 @@ go_library(
],
importpath = "github.com/google/cel-go/common/containers",
deps = [
- "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "//common/ast:go_default_library",
],
)
@@ -26,6 +26,6 @@ go_test(
":go_default_library",
],
deps = [
- "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "//common/ast:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/common/containers/container.go b/vendor/github.com/google/cel-go/common/containers/container.go
index d46698d3c..fc146b6fc 100644
--- a/vendor/github.com/google/cel-go/common/containers/container.go
+++ b/vendor/github.com/google/cel-go/common/containers/container.go
@@ -19,8 +19,9 @@ package containers
import (
"fmt"
"strings"
+ "unicode"
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ "github.com/google/cel-go/common/ast"
)
var (
@@ -62,9 +63,9 @@ func (c *Container) Extend(opts ...ContainerOption) (*Container, error) {
}
// Copy the name and aliases of the existing container.
ext := &Container{name: c.Name()}
- if len(c.aliasSet()) > 0 {
- aliasSet := make(map[string]string, len(c.aliasSet()))
- for k, v := range c.aliasSet() {
+ if len(c.AliasSet()) > 0 {
+ aliasSet := make(map[string]string, len(c.AliasSet()))
+ for k, v := range c.AliasSet() {
aliasSet[k] = v
}
ext.aliases = aliasSet
@@ -132,8 +133,8 @@ func (c *Container) ResolveCandidateNames(name string) []string {
return append(candidates, name)
}
-// aliasSet returns the alias to fully-qualified name mapping stored in the container.
-func (c *Container) aliasSet() map[string]string {
+// AliasSet returns the alias to fully-qualified name mapping stored in the container.
+func (c *Container) AliasSet() map[string]string {
if c == nil || c.aliases == nil {
return noAliases
}
@@ -159,7 +160,7 @@ func (c *Container) findAlias(name string) (string, bool) {
simple = name[0:dot]
qualifier = name[dot:]
}
- alias, found := c.aliasSet()[simple]
+ alias, found := c.AliasSet()[simple]
if !found {
return "", false
}
@@ -212,6 +213,13 @@ type ContainerOption func(*Container) (*Container, error)
func Abbrevs(qualifiedNames ...string) ContainerOption {
return func(c *Container) (*Container, error) {
for _, qn := range qualifiedNames {
+ qn = strings.TrimSpace(qn)
+ for _, r := range qn {
+ if !isIdentifierChar(r) {
+ return nil, fmt.Errorf(
+ "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn)
+ }
+ }
ind := strings.LastIndex(qn, ".")
if ind <= 0 || ind >= len(qn)-1 {
return nil, fmt.Errorf(
@@ -256,7 +264,7 @@ func aliasAs(kind, qualifiedName, alias string) ContainerOption {
return nil, fmt.Errorf("%s must refer to a valid qualified name: %s",
kind, qualifiedName)
}
- aliasRef, found := c.aliasSet()[alias]
+ aliasRef, found := c.AliasSet()[alias]
if found {
return nil, fmt.Errorf(
"%s collides with existing reference: name=%s, %s=%s, existing=%s",
@@ -278,6 +286,10 @@ func aliasAs(kind, qualifiedName, alias string) ContainerOption {
}
}
+func isIdentifierChar(r rune) bool {
+ return r <= unicode.MaxASCII && (r == '.' || r == '_' || unicode.IsLetter(r) || unicode.IsNumber(r))
+}
+
// Name sets the fully-qualified name of the Container.
func Name(name string) ContainerOption {
return func(c *Container) (*Container, error) {
@@ -297,19 +309,19 @@ func Name(name string) ContainerOption {
// ToQualifiedName converts an expression AST into a qualified name if possible, with a boolean
// 'found' value that indicates if the conversion is successful.
-func ToQualifiedName(e *exprpb.Expr) (string, bool) {
- switch e.GetExprKind().(type) {
- case *exprpb.Expr_IdentExpr:
- id := e.GetIdentExpr()
- return id.GetName(), true
- case *exprpb.Expr_SelectExpr:
- sel := e.GetSelectExpr()
+func ToQualifiedName(e ast.Expr) (string, bool) {
+ switch e.Kind() {
+ case ast.IdentKind:
+ id := e.AsIdent()
+ return id, true
+ case ast.SelectKind:
+ sel := e.AsSelect()
// Test only expressions are not valid as qualified names.
- if sel.GetTestOnly() {
+ if sel.IsTestOnly() {
return "", false
}
- if qual, found := ToQualifiedName(sel.GetOperand()); found {
- return qual + "." + sel.GetField(), true
+ if qual, found := ToQualifiedName(sel.Operand()); found {
+ return qual + "." + sel.FieldName(), true
}
}
return "", false
diff --git a/vendor/github.com/google/cel-go/common/debug/BUILD.bazel b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
index 1f029839c..724ed3404 100644
--- a/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
@@ -13,6 +13,8 @@ go_library(
importpath = "github.com/google/cel-go/common/debug",
deps = [
"//common:go_default_library",
- "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "//common/ast:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/common/debug/debug.go b/vendor/github.com/google/cel-go/common/debug/debug.go
index 5dab156ef..75f5f0d63 100644
--- a/vendor/github.com/google/cel-go/common/debug/debug.go
+++ b/vendor/github.com/google/cel-go/common/debug/debug.go
@@ -22,7 +22,9 @@ import (
"strconv"
"strings"
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
)
// Adorner returns debug metadata that will be tacked on to the string
@@ -38,7 +40,7 @@ type Writer interface {
// Buffer pushes an expression into an internal queue of expressions to
// write to a string.
- Buffer(e *exprpb.Expr)
+ Buffer(e ast.Expr)
}
type emptyDebugAdorner struct {
@@ -51,12 +53,12 @@ func (a *emptyDebugAdorner) GetMetadata(e any) string {
}
// ToDebugString gives the unadorned string representation of the Expr.
-func ToDebugString(e *exprpb.Expr) string {
+func ToDebugString(e ast.Expr) string {
return ToAdornedDebugString(e, emptyAdorner)
}
// ToAdornedDebugString gives the adorned string representation of the Expr.
-func ToAdornedDebugString(e *exprpb.Expr, adorner Adorner) string {
+func ToAdornedDebugString(e ast.Expr, adorner Adorner) string {
w := newDebugWriter(adorner)
w.Buffer(e)
return w.String()
@@ -78,49 +80,51 @@ func newDebugWriter(a Adorner) *debugWriter {
}
}
-func (w *debugWriter) Buffer(e *exprpb.Expr) {
+func (w *debugWriter) Buffer(e ast.Expr) {
if e == nil {
return
}
- switch e.ExprKind.(type) {
- case *exprpb.Expr_ConstExpr:
- w.append(formatLiteral(e.GetConstExpr()))
- case *exprpb.Expr_IdentExpr:
- w.append(e.GetIdentExpr().Name)
- case *exprpb.Expr_SelectExpr:
- w.appendSelect(e.GetSelectExpr())
- case *exprpb.Expr_CallExpr:
- w.appendCall(e.GetCallExpr())
- case *exprpb.Expr_ListExpr:
- w.appendList(e.GetListExpr())
- case *exprpb.Expr_StructExpr:
- w.appendStruct(e.GetStructExpr())
- case *exprpb.Expr_ComprehensionExpr:
- w.appendComprehension(e.GetComprehensionExpr())
+ switch e.Kind() {
+ case ast.LiteralKind:
+ w.append(formatLiteral(e.AsLiteral()))
+ case ast.IdentKind:
+ w.append(e.AsIdent())
+ case ast.SelectKind:
+ w.appendSelect(e.AsSelect())
+ case ast.CallKind:
+ w.appendCall(e.AsCall())
+ case ast.ListKind:
+ w.appendList(e.AsList())
+ case ast.MapKind:
+ w.appendMap(e.AsMap())
+ case ast.StructKind:
+ w.appendStruct(e.AsStruct())
+ case ast.ComprehensionKind:
+ w.appendComprehension(e.AsComprehension())
}
w.adorn(e)
}
-func (w *debugWriter) appendSelect(sel *exprpb.Expr_Select) {
- w.Buffer(sel.GetOperand())
+func (w *debugWriter) appendSelect(sel ast.SelectExpr) {
+ w.Buffer(sel.Operand())
w.append(".")
- w.append(sel.GetField())
- if sel.TestOnly {
+ w.append(sel.FieldName())
+ if sel.IsTestOnly() {
w.append("~test-only~")
}
}
-func (w *debugWriter) appendCall(call *exprpb.Expr_Call) {
- if call.Target != nil {
- w.Buffer(call.GetTarget())
+func (w *debugWriter) appendCall(call ast.CallExpr) {
+ if call.IsMemberFunction() {
+ w.Buffer(call.Target())
w.append(".")
}
- w.append(call.GetFunction())
+ w.append(call.FunctionName())
w.append("(")
- if len(call.GetArgs()) > 0 {
+ if len(call.Args()) > 0 {
w.addIndent()
w.appendLine()
- for i, arg := range call.GetArgs() {
+ for i, arg := range call.Args() {
if i > 0 {
w.append(",")
w.appendLine()
@@ -133,12 +137,12 @@ func (w *debugWriter) appendCall(call *exprpb.Expr_Call) {
w.append(")")
}
-func (w *debugWriter) appendList(list *exprpb.Expr_CreateList) {
+func (w *debugWriter) appendList(list ast.ListExpr) {
w.append("[")
- if len(list.GetElements()) > 0 {
+ if len(list.Elements()) > 0 {
w.appendLine()
w.addIndent()
- for i, elem := range list.GetElements() {
+ for i, elem := range list.Elements() {
if i > 0 {
w.append(",")
w.appendLine()
@@ -151,32 +155,25 @@ func (w *debugWriter) appendList(list *exprpb.Expr_CreateList) {
w.append("]")
}
-func (w *debugWriter) appendStruct(obj *exprpb.Expr_CreateStruct) {
- if obj.MessageName != "" {
- w.appendObject(obj)
- } else {
- w.appendMap(obj)
- }
-}
-
-func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) {
- w.append(obj.GetMessageName())
+func (w *debugWriter) appendStruct(obj ast.StructExpr) {
+ w.append(obj.TypeName())
w.append("{")
- if len(obj.GetEntries()) > 0 {
+ if len(obj.Fields()) > 0 {
w.appendLine()
w.addIndent()
- for i, entry := range obj.GetEntries() {
+ for i, f := range obj.Fields() {
+ field := f.AsStructField()
if i > 0 {
w.append(",")
w.appendLine()
}
- if entry.GetOptionalEntry() {
+ if field.IsOptional() {
w.append("?")
}
- w.append(entry.GetFieldKey())
+ w.append(field.Name())
w.append(":")
- w.Buffer(entry.GetValue())
- w.adorn(entry)
+ w.Buffer(field.Value())
+ w.adorn(f)
}
w.removeIndent()
w.appendLine()
@@ -184,23 +181,24 @@ func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) {
w.append("}")
}
-func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) {
+func (w *debugWriter) appendMap(m ast.MapExpr) {
w.append("{")
- if len(obj.GetEntries()) > 0 {
+ if m.Size() > 0 {
w.appendLine()
w.addIndent()
- for i, entry := range obj.GetEntries() {
+ for i, e := range m.Entries() {
+ entry := e.AsMapEntry()
if i > 0 {
w.append(",")
w.appendLine()
}
- if entry.GetOptionalEntry() {
+ if entry.IsOptional() {
w.append("?")
}
- w.Buffer(entry.GetMapKey())
+ w.Buffer(entry.Key())
w.append(":")
- w.Buffer(entry.GetValue())
- w.adorn(entry)
+ w.Buffer(entry.Value())
+ w.adorn(e)
}
w.removeIndent()
w.appendLine()
@@ -208,62 +206,67 @@ func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) {
w.append("}")
}
-func (w *debugWriter) appendComprehension(comprehension *exprpb.Expr_Comprehension) {
+func (w *debugWriter) appendComprehension(comprehension ast.ComprehensionExpr) {
w.append("__comprehension__(")
w.addIndent()
w.appendLine()
w.append("// Variable")
w.appendLine()
- w.append(comprehension.GetIterVar())
+ w.append(comprehension.IterVar())
w.append(",")
w.appendLine()
+ if comprehension.HasIterVar2() {
+ w.append(comprehension.IterVar2())
+ w.append(",")
+ w.appendLine()
+ }
w.append("// Target")
w.appendLine()
- w.Buffer(comprehension.GetIterRange())
+ w.Buffer(comprehension.IterRange())
w.append(",")
w.appendLine()
w.append("// Accumulator")
w.appendLine()
- w.append(comprehension.GetAccuVar())
+ w.append(comprehension.AccuVar())
w.append(",")
w.appendLine()
w.append("// Init")
w.appendLine()
- w.Buffer(comprehension.GetAccuInit())
+ w.Buffer(comprehension.AccuInit())
w.append(",")
w.appendLine()
w.append("// LoopCondition")
w.appendLine()
- w.Buffer(comprehension.GetLoopCondition())
+ w.Buffer(comprehension.LoopCondition())
w.append(",")
w.appendLine()
w.append("// LoopStep")
w.appendLine()
- w.Buffer(comprehension.GetLoopStep())
+ w.Buffer(comprehension.LoopStep())
w.append(",")
w.appendLine()
w.append("// Result")
w.appendLine()
- w.Buffer(comprehension.GetResult())
+ w.Buffer(comprehension.Result())
w.append(")")
w.removeIndent()
}
-func formatLiteral(c *exprpb.Constant) string {
- switch c.GetConstantKind().(type) {
- case *exprpb.Constant_BoolValue:
- return fmt.Sprintf("%t", c.GetBoolValue())
- case *exprpb.Constant_BytesValue:
- return fmt.Sprintf("b\"%s\"", string(c.GetBytesValue()))
- case *exprpb.Constant_DoubleValue:
- return fmt.Sprintf("%v", c.GetDoubleValue())
- case *exprpb.Constant_Int64Value:
- return fmt.Sprintf("%d", c.GetInt64Value())
- case *exprpb.Constant_StringValue:
- return strconv.Quote(c.GetStringValue())
- case *exprpb.Constant_Uint64Value:
- return fmt.Sprintf("%du", c.GetUint64Value())
- case *exprpb.Constant_NullValue:
+func formatLiteral(c ref.Val) string {
+ switch v := c.(type) {
+ case types.Bool:
+ return fmt.Sprintf("%t", v)
+ case types.Bytes:
+ return fmt.Sprintf("b%s", strconv.Quote(string(v)))
+ case types.Double:
+ return fmt.Sprintf("%v", float64(v))
+ case types.Int:
+ return fmt.Sprintf("%d", int64(v))
+ case types.String:
+ return strconv.Quote(string(v))
+ case types.Uint:
+ return fmt.Sprintf("%du", uint64(v))
+ case types.Null:
return "null"
default:
panic("Unknown constant type")
diff --git a/vendor/github.com/google/cel-go/common/decls/BUILD.bazel b/vendor/github.com/google/cel-go/common/decls/BUILD.bazel
index 17791dce6..bd3f9ae70 100644
--- a/vendor/github.com/google/cel-go/common/decls/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/decls/BUILD.bazel
@@ -13,7 +13,9 @@ go_library(
importpath = "github.com/google/cel-go/common/decls",
deps = [
"//checker/decls:go_default_library",
+ "//common:go_default_library",
"//common/functions:go_default_library",
+ "//common/operators:go_default_library",
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
diff --git a/vendor/github.com/google/cel-go/common/decls/decls.go b/vendor/github.com/google/cel-go/common/decls/decls.go
index 734ebe57e..a4a51c3f2 100644
--- a/vendor/github.com/google/cel-go/common/decls/decls.go
+++ b/vendor/github.com/google/cel-go/common/decls/decls.go
@@ -20,7 +20,9 @@ import (
"strings"
chkdecls "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common"
"github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
@@ -54,6 +56,7 @@ func NewFunction(name string, opts ...FunctionOpt) (*FunctionDecl, error) {
// overload instances.
type FunctionDecl struct {
name string
+ doc string
// overloads associated with the function name.
overloads map[string]*OverloadDecl
@@ -84,6 +87,26 @@ const (
declarationEnabled
)
+// Documentation generates documentation about the Function and its overloads as a common.Doc object.
+func (f *FunctionDecl) Documentation() *common.Doc {
+ if f == nil {
+ return nil
+ }
+ children := make([]*common.Doc, len(f.OverloadDecls()))
+ for i, o := range f.OverloadDecls() {
+ var examples []*common.Doc
+ for _, ex := range o.Examples() {
+ examples = append(examples, common.NewExampleDoc(ex))
+ }
+ od := common.NewOverloadDoc(o.ID(), formatSignature(f.Name(), o), examples...)
+ children[i] = od
+ }
+ return common.NewFunctionDoc(
+ f.Name(),
+ f.Description(),
+ children...)
+}
+
// Name returns the function name in human-readable terms, e.g. 'contains' of 'math.least'
func (f *FunctionDecl) Name() string {
if f == nil {
@@ -92,9 +115,22 @@ func (f *FunctionDecl) Name() string {
return f.name
}
+// Description provides an overview of the function's purpose.
+//
+// Usage examples should be included on specific overloads.
+func (f *FunctionDecl) Description() string {
+ if f == nil {
+ return ""
+ }
+ return f.doc
+}
+
// IsDeclarationDisabled indicates that the function implementation should be added to the dispatcher, but the
// declaration should not be exposed for use in expressions.
func (f *FunctionDecl) IsDeclarationDisabled() bool {
+ if f == nil {
+ return true
+ }
return f.state == declarationDisabled
}
@@ -107,8 +143,8 @@ func (f *FunctionDecl) Merge(other *FunctionDecl) (*FunctionDecl, error) {
if f == other {
return f, nil
}
- if f.Name() != other.Name() {
- return nil, fmt.Errorf("cannot merge unrelated functions. %s and %s", f.Name(), other.Name())
+ if f == nil || other == nil || f.Name() != other.Name() {
+ return nil, fmt.Errorf("cannot merge unrelated functions. %q and %q", f.Name(), other.Name())
}
merged := &FunctionDecl{
name: f.Name(),
@@ -120,12 +156,17 @@ func (f *FunctionDecl) Merge(other *FunctionDecl) (*FunctionDecl, error) {
disableTypeGuards: f.disableTypeGuards && other.disableTypeGuards,
// default to the current functions declaration state.
state: f.state,
+ doc: f.doc,
}
// If the other state indicates that the declaration should be explicitly enabled or
// disabled, then update the merged state with the most recent value.
if other.state != declarationStateUnset {
merged.state = other.state
}
+ // Allow for non-empty overrides of documentation
+ if len(other.doc) != 0 && f.doc != other.doc {
+ merged.doc = other.doc
+ }
// baseline copy of the overloads and their ordinals
copy(merged.overloadOrdinals, f.overloadOrdinals)
for oID, o := range f.overloads {
@@ -148,6 +189,70 @@ func (f *FunctionDecl) Merge(other *FunctionDecl) (*FunctionDecl, error) {
return merged, nil
}
+// FunctionSubsetter subsets a function declaration or returns nil and false if the function
+// subset was empty.
+type FunctionSubsetter func(fn *FunctionDecl) (*FunctionDecl, bool)
+
+// OverloadSelector selects an overload associated with a given function when it returns true.
+//
+// Used in combination with the Subset method.
+type OverloadSelector func(overload *OverloadDecl) bool
+
+// IncludeOverloads defines an OverloadSelector which allow-lists a set of overloads by their ids.
+func IncludeOverloads(overloadIDs ...string) OverloadSelector {
+ return func(overload *OverloadDecl) bool {
+ for _, oID := range overloadIDs {
+ if overload.id == oID {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// ExcludeOverloads defines an OverloadSelector which deny-lists a set of overloads by their ids.
+func ExcludeOverloads(overloadIDs ...string) OverloadSelector {
+ return func(overload *OverloadDecl) bool {
+ for _, oID := range overloadIDs {
+ if overload.id == oID {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+// Subset returns a new function declaration which contains only the overloads with the specified IDs.
+// If the subset function contains no overloads, then nil is returned to indicate the function is not
+// functional.
+func (f *FunctionDecl) Subset(selector OverloadSelector) *FunctionDecl {
+ if f == nil {
+ return nil
+ }
+ overloads := make(map[string]*OverloadDecl)
+ overloadOrdinals := make([]string, 0, len(f.overloadOrdinals))
+ for _, oID := range f.overloadOrdinals {
+ overload := f.overloads[oID]
+ if selector(overload) {
+ overloads[oID] = overload
+ overloadOrdinals = append(overloadOrdinals, oID)
+ }
+ }
+ if len(overloads) == 0 {
+ return nil
+ }
+ subset := &FunctionDecl{
+ name: f.Name(),
+ doc: f.doc,
+ overloads: overloads,
+ singleton: f.singleton,
+ disableTypeGuards: f.disableTypeGuards,
+ state: f.state,
+ overloadOrdinals: overloadOrdinals,
+ }
+ return subset
+}
+
// AddOverload ensures that the new overload does not collide with an existing overload signature;
// however, if the function signatures are identical, the implementation may be rewritten as its
// difficult to compare functions by object identity.
@@ -155,6 +260,9 @@ func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error {
if f == nil {
return fmt.Errorf("nil function cannot add overload: %s", overload.ID())
}
+ if overload == nil {
+ return fmt.Errorf("cannot add nil overload to funciton: %s", f.Name())
+ }
for oID, o := range f.overloads {
if oID != overload.ID() && o.SignatureOverlaps(overload) {
return fmt.Errorf("overload signature collision in function %s: %s collides with %s", f.Name(), oID, overload.ID())
@@ -162,11 +270,20 @@ func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error {
if oID == overload.ID() {
if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() {
// Allow redefinition of an overload implementation so long as the signatures match.
- f.overloads[oID] = overload
+ if overload.hasBinding() {
+ f.overloads[oID] = overload
+ }
+ // Allow redefinition of the doc string.
+ if len(overload.doc) != 0 && o.doc != overload.doc {
+ o.doc = overload.doc
+ }
return nil
}
return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.Name(), oID)
}
+ if overload.HasLateBinding() != o.HasLateBinding() {
+ return fmt.Errorf("overload with late binding cannot be added to function %s: cannot mix late and non-late bindings", f.Name())
+ }
}
f.overloadOrdinals = append(f.overloadOrdinals, overload.ID())
f.overloads[overload.ID()] = overload
@@ -175,8 +292,9 @@ func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error {
// OverloadDecls returns the overload declarations in the order in which they were declared.
func (f *FunctionDecl) OverloadDecls() []*OverloadDecl {
+ var emptySet []*OverloadDecl
if f == nil {
- return []*OverloadDecl{}
+ return emptySet
}
overloads := make([]*OverloadDecl, 0, len(f.overloads))
for _, oID := range f.overloadOrdinals {
@@ -185,15 +303,31 @@ func (f *FunctionDecl) OverloadDecls() []*OverloadDecl {
return overloads
}
+// HasLateBinding returns true if the function has late bindings. A function cannot mix late bindings with other bindings.
+func (f *FunctionDecl) HasLateBinding() bool {
+ if f == nil {
+ return false
+ }
+ for _, oID := range f.overloadOrdinals {
+ if f.overloads[oID].HasLateBinding() {
+ return true
+ }
+ }
+ return false
+}
+
// Bindings produces a set of function bindings, if any are defined.
func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) {
+ var emptySet []*functions.Overload
if f == nil {
- return []*functions.Overload{}, nil
+ return emptySet, nil
}
overloads := []*functions.Overload{}
nonStrict := false
+ hasLateBinding := false
for _, oID := range f.overloadOrdinals {
o := f.overloads[oID]
+ hasLateBinding = hasLateBinding || o.HasLateBinding()
if o.hasBinding() {
overload := &functions.Overload{
Operator: o.ID(),
@@ -211,6 +345,9 @@ func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) {
if len(overloads) != 0 {
return nil, fmt.Errorf("singleton function incompatible with specialized overloads: %s", f.Name())
}
+ if hasLateBinding {
+ return nil, fmt.Errorf("singleton function incompatible with late bindings: %s", f.Name())
+ }
overloads = []*functions.Overload{
{
Operator: f.Name(),
@@ -249,15 +386,15 @@ func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) {
// are preserved in order to assist with the function resolution step.
switch len(args) {
case 1:
- if o.unaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) {
+ if o.unaryOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) {
return o.unaryOp(args[0])
}
case 2:
- if o.binaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) {
+ if o.binaryOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) {
return o.binaryOp(args[0], args[1])
}
}
- if o.functionOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) {
+ if o.functionOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) {
return o.functionOp(args...)
}
// eventually this will fall through to the noSuchOverload below.
@@ -296,6 +433,14 @@ func MaybeNoSuchOverload(funcName string, args ...ref.Val) ref.Val {
// FunctionOpt defines a functional option for mutating a function declaration.
type FunctionOpt func(*FunctionDecl) (*FunctionDecl, error)
+// FunctionDocs configures documentation from a list of strings separated by newlines.
+func FunctionDocs(docs ...string) FunctionOpt {
+ return func(fn *FunctionDecl) (*FunctionDecl, error) {
+ fn.doc = common.MultilineDescription(docs...)
+ return fn, nil
+ }
+}
+
// DisableTypeGuards disables automatically generated function invocation guards on direct overload calls.
// Type guards remain on during dynamic dispatch for parsed-only expressions.
func DisableTypeGuards(value bool) FunctionOpt {
@@ -448,9 +593,13 @@ func newOverloadInternal(overloadID string,
// implementation.
type OverloadDecl struct {
id string
+ doc string
argTypes []*types.Type
resultType *types.Type
isMemberFunction bool
+ // hasLateBinding indicates that the function has a binding which is not known at compile time.
+ // This is useful for functions which have side-effects or are not deterministically computable.
+ hasLateBinding bool
// nonStrict indicates that the function will accept error and unknown arguments as inputs.
nonStrict bool
// operandTrait indicates whether the member argument should have a specific type-trait.
@@ -467,6 +616,15 @@ type OverloadDecl struct {
functionOp functions.FunctionOp
}
+// Examples returns a list of string examples for the overload.
+func (o *OverloadDecl) Examples() []string {
+ var emptySet []string
+ if o == nil || len(o.doc) == 0 {
+ return emptySet
+ }
+ return common.ParseDescriptions(o.doc)
+}
+
// ID mirrors the overload signature and provides a unique id which may be referenced within the type-checker
// and interpreter to optimize performance.
//
@@ -506,6 +664,14 @@ func (o *OverloadDecl) IsNonStrict() bool {
return o.nonStrict
}
+// HasLateBinding returns whether the overload has a binding which is not known at compile time.
+func (o *OverloadDecl) HasLateBinding() bool {
+ if o == nil {
+ return false
+ }
+ return o.hasLateBinding
+}
+
// OperandTrait returns the trait mask of the first operand to the overload call, e.g.
// `traits.Indexer`
func (o *OverloadDecl) OperandTrait() int {
@@ -664,6 +830,14 @@ func matchOperandTrait(trait int, arg ref.Val) bool {
// OverloadOpt is a functional option for configuring a function overload.
type OverloadOpt func(*OverloadDecl) (*OverloadDecl, error)
+// OverloadExamples configures example expressions for the overload.
+func OverloadExamples(examples ...string) OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ o.doc = common.MultilineDescription(examples...)
+ return o, nil
+ }
+}
+
// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime
// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
func UnaryBinding(binding functions.UnaryOp) OverloadOpt {
@@ -674,6 +848,9 @@ func UnaryBinding(binding functions.UnaryOp) OverloadOpt {
if len(o.ArgTypes()) != 1 {
return nil, fmt.Errorf("unary function bound to non-unary overload: %s", o.ID())
}
+ if o.hasLateBinding {
+ return nil, fmt.Errorf("overload already has a late binding: %s", o.ID())
+ }
o.unaryOp = binding
return o, nil
}
@@ -689,6 +866,9 @@ func BinaryBinding(binding functions.BinaryOp) OverloadOpt {
if len(o.ArgTypes()) != 2 {
return nil, fmt.Errorf("binary function bound to non-binary overload: %s", o.ID())
}
+ if o.hasLateBinding {
+ return nil, fmt.Errorf("overload already has a late binding: %s", o.ID())
+ }
o.binaryOp = binding
return o, nil
}
@@ -701,11 +881,26 @@ func FunctionBinding(binding functions.FunctionOp) OverloadOpt {
if o.hasBinding() {
return nil, fmt.Errorf("overload already has a binding: %s", o.ID())
}
+ if o.hasLateBinding {
+ return nil, fmt.Errorf("overload already has a late binding: %s", o.ID())
+ }
o.functionOp = binding
return o, nil
}
}
+// LateFunctionBinding indicates that the function has a binding which is not known at compile time.
+// This is useful for functions which have side-effects or are not deterministically computable.
+func LateFunctionBinding() OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ if o.hasBinding() {
+ return nil, fmt.Errorf("overload already has a binding: %s", o.ID())
+ }
+ o.hasLateBinding = true
+ return o, nil
+ }
+}
+
// OverloadIsNonStrict enables the function to be called with error and unknown argument values.
//
// Note: do not use this option unless absoluately necessary as it should be an uncommon feature.
@@ -735,13 +930,27 @@ func NewVariable(name string, t *types.Type) *VariableDecl {
return &VariableDecl{name: name, varType: t}
}
+// NewVariableWithDoc creates a new variable declaration with usage documentation.
+func NewVariableWithDoc(name string, t *types.Type, doc string) *VariableDecl {
+ return &VariableDecl{name: name, varType: t, doc: doc}
+}
+
// VariableDecl defines a variable declaration which may optionally have a constant value.
type VariableDecl struct {
name string
+ doc string
varType *types.Type
value ref.Val
}
+// Documentation returns name, type, and description for the variable.
+func (v *VariableDecl) Documentation() *common.Doc {
+ if v == nil {
+ return nil
+ }
+ return common.NewVariableDoc(v.Name(), describeCELType(v.Type()), v.Description())
+}
+
// Name returns the fully-qualified variable name
func (v *VariableDecl) Name() string {
if v == nil {
@@ -750,6 +959,16 @@ func (v *VariableDecl) Name() string {
return v.name
}
+// Description returns the usage documentation for the variable, if set.
+//
+// Good usage instructions provide information about the valid formats, ranges, sizes for the variable type.
+func (v *VariableDecl) Description() string {
+ if v == nil {
+ return ""
+ }
+ return v.doc
+}
+
// Type returns the types.Type value associated with the variable.
func (v *VariableDecl) Type() *types.Type {
if v == nil {
@@ -775,22 +994,32 @@ func (v *VariableDecl) DeclarationIsEquivalent(other *VariableDecl) bool {
return v.Name() == other.Name() && v.Type().IsEquivalentType(other.Type())
}
+// TypeVariable creates a new type identifier for use within a types.Provider
+func TypeVariable(t *types.Type) *VariableDecl {
+ return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t))
+}
+
// VariableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration.
func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) {
+ return variableDeclToExprDecl(v)
+}
+
+// variableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration.
+func variableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) {
varType, err := types.TypeToExprType(v.Type())
if err != nil {
return nil, err
}
- return chkdecls.NewVar(v.Name(), varType), nil
-}
-
-// TypeVariable creates a new type identifier for use within a types.Provider
-func TypeVariable(t *types.Type) *VariableDecl {
- return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t))
+ return chkdecls.NewVarWithDoc(v.Name(), varType, v.doc), nil
}
// FunctionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration.
func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) {
+ return functionDeclToExprDecl(f)
+}
+
+// functionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration.
+func functionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) {
overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads))
for i, oID := range f.overloadOrdinals {
o := f.overloads[oID]
@@ -826,8 +1055,10 @@ func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) {
overloads[i] = chkdecls.NewParameterizedOverload(oID, argTypes, resultType, params)
}
}
+ doc := common.MultilineDescription(o.Examples()...)
+ overloads[i].Doc = doc
}
- return chkdecls.NewFunction(f.Name(), overloads...), nil
+ return chkdecls.NewFunctionWithDoc(f.Name(), f.Description(), overloads...), nil
}
func collectParamNames(paramNames map[string]struct{}, arg *types.Type) {
@@ -839,6 +1070,60 @@ func collectParamNames(paramNames map[string]struct{}, arg *types.Type) {
}
}
+func formatSignature(fnName string, o *OverloadDecl) string {
+ if opName, isOperator := operators.FindReverse(fnName); isOperator {
+ if opName == "" {
+ opName = fnName
+ }
+ return formatOperator(opName, o)
+ }
+ return formatCall(fnName, o)
+}
+
+func formatOperator(opName string, o *OverloadDecl) string {
+ args := o.ArgTypes()
+ argTypes := make([]string, len(o.ArgTypes()))
+ for j, a := range args {
+ argTypes[j] = describeCELType(a)
+ }
+ ret := describeCELType(o.ResultType())
+ switch len(args) {
+ case 1:
+ return fmt.Sprintf("%s%s -> %s", opName, argTypes[0], ret)
+ case 2:
+ if opName == operators.Index {
+ return fmt.Sprintf("%s[%s] -> %s", argTypes[0], argTypes[1], ret)
+ }
+ return fmt.Sprintf("%s %s %s -> %s", argTypes[0], opName, argTypes[1], ret)
+ default:
+ if opName == operators.Conditional {
+ return fmt.Sprint("bool ? : -> ")
+ }
+ return formatCall(opName, o)
+ }
+}
+
+func formatCall(funcName string, o *OverloadDecl) string {
+ args := make([]string, len(o.ArgTypes()))
+ ret := describeCELType(o.ResultType())
+ for j, a := range o.ArgTypes() {
+ args[j] = describeCELType(a)
+ }
+ if o.IsMemberFunction() {
+ target := args[0]
+ args = args[1:]
+ return fmt.Sprintf("%s.%s(%s) -> %s", target, funcName, strings.Join(args, ", "), ret)
+ }
+ return fmt.Sprintf("%s(%s) -> %s", funcName, strings.Join(args, ", "), ret)
+}
+
+func describeCELType(t *types.Type) string {
+ if t.Kind() == types.TypeKind {
+ return "type"
+ }
+ return t.String()
+}
+
var (
- emptyArgs = []*types.Type{}
+ emptyArgs []*types.Type
)
diff --git a/vendor/github.com/google/cel-go/common/doc.go b/vendor/github.com/google/cel-go/common/doc.go
index 5362fdfe4..06eae3642 100644
--- a/vendor/github.com/google/cel-go/common/doc.go
+++ b/vendor/github.com/google/cel-go/common/doc.go
@@ -15,3 +15,157 @@
// Package common defines types and utilities common to expression parsing,
// checking, and interpretation
package common
+
+import (
+ "strings"
+ "unicode"
+)
+
+// DocKind indicates the type of documentation element.
+type DocKind int
+
+const (
+ // DocEnv represents environment variable documentation.
+ DocEnv DocKind = iota + 1
+ // DocFunction represents function documentation.
+ DocFunction
+ // DocOverload represents function overload documentation.
+ DocOverload
+ // DocVariable represents variable documentation.
+ DocVariable
+ // DocMacro represents macro documentation.
+ DocMacro
+ // DocExample represents example documentation.
+ DocExample
+)
+
+// Doc holds the documentation details for a specific program element like
+// a variable, function, macro, or example.
+type Doc struct {
+ // Kind specifies the type of documentation element (e.g., Function, Variable).
+ Kind DocKind
+
+ // Name is the identifier of the documented element (e.g., function name, variable name).
+ Name string
+
+ // Type is the data type associated with the element, primarily used for variables.
+ Type string
+
+ // Signature represents the function or overload signature.
+ Signature string
+
+ // Description holds the textual description of the element, potentially spanning multiple lines.
+ Description string
+
+ // Children holds nested documentation elements, such as overloads for a function
+ // or examples for a function/macro.
+ Children []*Doc
+}
+
+// MultilineDescription combines multiple lines into a newline separated string.
+func MultilineDescription(lines ...string) string {
+ return strings.Join(lines, "\n")
+}
+
+// ParseDescription takes a single string containing newline characters and splits
+// it into a multiline description. All empty lines will be skipped.
+//
+// Returns an empty string if the input string is empty.
+func ParseDescription(doc string) string {
+ var lines []string
+ if len(doc) != 0 {
+ // Split the input string by newline characters.
+ for _, line := range strings.Split(doc, "\n") {
+ l := strings.TrimRightFunc(line, unicode.IsSpace)
+ if len(l) == 0 {
+ continue
+ }
+ lines = append(lines, l)
+ }
+ }
+ // Return an empty slice if the input is empty.
+ return MultilineDescription(lines...)
+}
+
+// ParseDescriptions splits a documentation string into multiple multi-line description
+// sections, using blank lines as delimiters.
+func ParseDescriptions(doc string) []string {
+ var examples []string
+ if len(doc) != 0 {
+ lines := strings.Split(doc, "\n")
+ lineStart := 0
+ for i, l := range lines {
+ // Trim trailing whitespace to identify effectively blank lines.
+ l = strings.TrimRightFunc(l, unicode.IsSpace)
+ // If a line is blank, it marks the end of the current section.
+ if len(l) == 0 {
+ // Start the next section after the blank line.
+ ex := lines[lineStart:i]
+ if len(ex) != 0 {
+ examples = append(examples, MultilineDescription(ex...))
+ }
+ lineStart = i + 1
+ }
+ }
+ // Append the last section if it wasn't terminated by a blank line.
+ if lineStart < len(lines) {
+ examples = append(examples, MultilineDescription(lines[lineStart:]...))
+ }
+ }
+ return examples
+}
+
+// NewVariableDoc creates a new Doc struct specifically for documenting a variable.
+func NewVariableDoc(name, celType, description string) *Doc {
+ return &Doc{
+ Kind: DocVariable,
+ Name: name,
+ Type: celType,
+ Description: ParseDescription(description),
+ }
+}
+
+// NewFunctionDoc creates a new Doc struct for documenting a function.
+func NewFunctionDoc(name, description string, overloads ...*Doc) *Doc {
+ return &Doc{
+ Kind: DocFunction,
+ Name: name,
+ Description: ParseDescription(description),
+ Children: overloads,
+ }
+}
+
+// NewOverloadDoc creates a new Doc struct for a function example.
+func NewOverloadDoc(id, signature string, examples ...*Doc) *Doc {
+ return &Doc{
+ Kind: DocOverload,
+ Name: id,
+ Signature: signature,
+ Children: examples,
+ }
+}
+
+// NewMacroDoc creates a new Doc struct for documenting a macro.
+func NewMacroDoc(name, description string, examples ...*Doc) *Doc {
+ return &Doc{
+ Kind: DocMacro,
+ Name: name,
+ Description: ParseDescription(description),
+ Children: examples,
+ }
+}
+
+// NewExampleDoc creates a new Doc struct specifically for holding an example.
+func NewExampleDoc(ex string) *Doc {
+ return &Doc{
+ Kind: DocExample,
+ Description: ex,
+ }
+}
+
+// Documentor is an interface for types that can provide their own documentation.
+type Documentor interface {
+ // Documentation returns the documentation coded by the DocKind to assist
+ // with text formatting.
+ Documentation() *Doc
+}
diff --git a/vendor/github.com/google/cel-go/common/env/BUILD.bazel b/vendor/github.com/google/cel-go/common/env/BUILD.bazel
new file mode 100644
index 000000000..aebe1e544
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/env/BUILD.bazel
@@ -0,0 +1,50 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "env.go",
+ ],
+ importpath = "github.com/google/cel-go/common/env",
+ deps = [
+ "//common:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/types:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "env_test.go",
+ ],
+ data = glob(["testdata/**"]),
+ embed = [":go_default_library"],
+ deps = [
+ "//common/decls:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types:go_default_library",
+ "@in_gopkg_yaml_v3//:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/env/env.go b/vendor/github.com/google/cel-go/common/env/env.go
new file mode 100644
index 000000000..d848860c2
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/env/env.go
@@ -0,0 +1,887 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package env provides a representation of a CEL environment.
+package env
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/types"
+)
+
+// NewConfig creates an instance of a YAML serializable CEL environment configuration.
+func NewConfig(name string) *Config {
+ return &Config{
+ Name: name,
+ }
+}
+
+// Config represents a serializable form of the CEL environment configuration.
+//
+// Note: custom validations, feature flags, and performance tuning parameters are not (yet)
+// considered part of the core CEL environment configuration and should be managed separately
+// until a common convention for such settings is developed.
+type Config struct {
+ Name string `yaml:"name,omitempty"`
+ Description string `yaml:"description,omitempty"`
+ Container string `yaml:"container,omitempty"`
+ Imports []*Import `yaml:"imports,omitempty"`
+ StdLib *LibrarySubset `yaml:"stdlib,omitempty"`
+ Extensions []*Extension `yaml:"extensions,omitempty"`
+ ContextVariable *ContextVariable `yaml:"context_variable,omitempty"`
+ Variables []*Variable `yaml:"variables,omitempty"`
+ Functions []*Function `yaml:"functions,omitempty"`
+ Validators []*Validator `yaml:"validators,omitempty"`
+ Features []*Feature `yaml:"features,omitempty"`
+}
+
+// Validate validates the whole configuration is well-formed.
+func (c *Config) Validate() error {
+ if c == nil {
+ return nil
+ }
+ var errs []error
+ for _, imp := range c.Imports {
+ if err := imp.Validate(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if err := c.StdLib.Validate(); err != nil {
+ errs = append(errs, err)
+ }
+ for _, ext := range c.Extensions {
+ if err := ext.Validate(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if err := c.ContextVariable.Validate(); err != nil {
+ errs = append(errs, err)
+ }
+ if c.ContextVariable != nil && len(c.Variables) != 0 {
+ errs = append(errs, errors.New("invalid config: either context variable or variables may be set, but not both"))
+ }
+ for _, v := range c.Variables {
+ if err := v.Validate(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ for _, fn := range c.Functions {
+ if err := fn.Validate(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ for _, feat := range c.Features {
+ if err := feat.Validate(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ for _, val := range c.Validators {
+ if err := val.Validate(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ return errors.Join(errs...)
+}
+
+// SetContainer configures the container name for this configuration.
+func (c *Config) SetContainer(container string) *Config {
+ c.Container = container
+ return c
+}
+
+// AddVariableDecls adds one or more variables to the config, converting them to serializable values first.
+//
+// VariableDecl inputs are expected to be well-formed.
+func (c *Config) AddVariableDecls(vars ...*decls.VariableDecl) *Config {
+ convVars := make([]*Variable, len(vars))
+ for i, v := range vars {
+ if v == nil {
+ continue
+ }
+ cv := NewVariable(v.Name(), SerializeTypeDesc(v.Type()))
+ cv.Description = v.Description()
+ convVars[i] = cv
+ }
+ return c.AddVariables(convVars...)
+}
+
+// AddVariables adds one or more vairables to the config.
+func (c *Config) AddVariables(vars ...*Variable) *Config {
+ c.Variables = append(c.Variables, vars...)
+ return c
+}
+
+// SetContextVariable configures the ContextVariable for this configuration.
+func (c *Config) SetContextVariable(ctx *ContextVariable) *Config {
+ c.ContextVariable = ctx
+ return c
+}
+
+// AddFunctionDecls adds one or more functions to the config, converting them to serializable values first.
+//
+// FunctionDecl inputs are expected to be well-formed.
+func (c *Config) AddFunctionDecls(funcs ...*decls.FunctionDecl) *Config {
+ convFuncs := make([]*Function, len(funcs))
+ for i, fn := range funcs {
+ if fn == nil {
+ continue
+ }
+ overloads := make([]*Overload, 0, len(fn.OverloadDecls()))
+ for _, o := range fn.OverloadDecls() {
+ overloadID := o.ID()
+ args := make([]*TypeDesc, 0, len(o.ArgTypes()))
+ for _, a := range o.ArgTypes() {
+ args = append(args, SerializeTypeDesc(a))
+ }
+ ret := SerializeTypeDesc(o.ResultType())
+ var overload *Overload
+ if o.IsMemberFunction() {
+ overload = NewMemberOverload(overloadID, args[0], args[1:], ret)
+ } else {
+ overload = NewOverload(overloadID, args, ret)
+ }
+ exampleCount := len(o.Examples())
+ if exampleCount > 0 {
+ overload.Examples = o.Examples()
+ }
+ overloads = append(overloads, overload)
+ }
+ cf := NewFunction(fn.Name(), overloads...)
+ cf.Description = fn.Description()
+ convFuncs[i] = cf
+ }
+ return c.AddFunctions(convFuncs...)
+}
+
+// AddFunctions adds one or more functions to the config.
+func (c *Config) AddFunctions(funcs ...*Function) *Config {
+ c.Functions = append(c.Functions, funcs...)
+ return c
+}
+
+// SetStdLib configures the LibrarySubset for the standard library.
+func (c *Config) SetStdLib(subset *LibrarySubset) *Config {
+ c.StdLib = subset
+ return c
+}
+
+// AddImports appends a set of imports to the config.
+func (c *Config) AddImports(imps ...*Import) *Config {
+ c.Imports = append(c.Imports, imps...)
+ return c
+}
+
+// AddExtensions appends a set of extensions to the config.
+func (c *Config) AddExtensions(exts ...*Extension) *Config {
+ c.Extensions = append(c.Extensions, exts...)
+ return c
+}
+
+// AddValidators appends one or more validators to the config.
+func (c *Config) AddValidators(vals ...*Validator) *Config {
+ c.Validators = append(c.Validators, vals...)
+ return c
+}
+
+// AddFeatures appends one or more features to the config.
+func (c *Config) AddFeatures(feats ...*Feature) *Config {
+ c.Features = append(c.Features, feats...)
+ return c
+}
+
+// NewImport returns a serializable import value from the qualified type name.
+func NewImport(name string) *Import {
+ return &Import{Name: name}
+}
+
+// Import represents a type name that will be appreviated by its simple name using
+// the cel.Abbrevs() option.
+type Import struct {
+ Name string `yaml:"name"`
+}
+
+// Validate validates the import configuration is well-formed.
+func (imp *Import) Validate() error {
+ if imp == nil {
+ return errors.New("invalid import: nil")
+ }
+ if imp.Name == "" {
+ return errors.New("invalid import: missing type name")
+ }
+ return nil
+}
+
+// NewVariable returns a serializable variable from a name and type definition
+func NewVariable(name string, t *TypeDesc) *Variable {
+ return NewVariableWithDoc(name, t, "")
+}
+
+// NewVariableWithDoc returns a serializable variable from a name, type definition, and doc string.
+func NewVariableWithDoc(name string, t *TypeDesc, doc string) *Variable {
+ return &Variable{Name: name, TypeDesc: t, Description: doc}
+}
+
+// Variable represents a typed variable declaration which will be published via the
+// cel.VariableDecls() option.
+type Variable struct {
+ Name string `yaml:"name"`
+ Description string `yaml:"description,omitempty"`
+
+ // Type represents the type declaration for the variable.
+ //
+ // Deprecated: use the embedded *TypeDesc fields directly.
+ Type *TypeDesc `yaml:"type,omitempty"`
+
+ // TypeDesc is an embedded set of fields allowing for the specification of the Variable type.
+ *TypeDesc `yaml:",inline"`
+}
+
+// Validate validates the variable configuration is well-formed.
+func (v *Variable) Validate() error {
+ if v == nil {
+ return errors.New("invalid variable: nil")
+ }
+ if v.Name == "" {
+ return errors.New("invalid variable: missing variable name")
+ }
+ if err := v.GetType().Validate(); err != nil {
+ return fmt.Errorf("invalid variable %q: %w", v.Name, err)
+ }
+ return nil
+}
+
+// GetType returns the variable type description.
+//
+// Note, if both the embedded TypeDesc and the field Type are non-nil, the embedded TypeDesc will
+// take precedence.
+func (v *Variable) GetType() *TypeDesc {
+ if v == nil {
+ return nil
+ }
+ if v.TypeDesc != nil {
+ return v.TypeDesc
+ }
+ if v.Type != nil {
+ return v.Type
+ }
+ return nil
+}
+
+// AsCELVariable converts the serializable form of the Variable into a CEL environment declaration.
+func (v *Variable) AsCELVariable(tp types.Provider) (*decls.VariableDecl, error) {
+ if err := v.Validate(); err != nil {
+ return nil, err
+ }
+ t, err := v.GetType().AsCELType(tp)
+ if err != nil {
+ return nil, fmt.Errorf("invalid variable %q: %w", v.Name, err)
+ }
+ return decls.NewVariableWithDoc(v.Name, t, v.Description), nil
+}
+
+// NewContextVariable returns a serializable context variable with a specific type name.
+func NewContextVariable(typeName string) *ContextVariable {
+ return &ContextVariable{TypeName: typeName}
+}
+
+// ContextVariable represents a structured message whose fields are to be treated as the top-level
+// variable identifiers within CEL expressions.
+type ContextVariable struct {
+ // TypeName represents the fully qualified typename of the context variable.
+ // Currently, only protobuf types are supported.
+ TypeName string `yaml:"type_name"`
+}
+
+// Validate validates the context-variable configuration is well-formed.
+func (ctx *ContextVariable) Validate() error {
+ if ctx == nil {
+ return nil
+ }
+ if ctx.TypeName == "" {
+ return errors.New("invalid context variable: missing type name")
+ }
+ return nil
+}
+
+// NewFunction creates a serializable function and overload set.
+func NewFunction(name string, overloads ...*Overload) *Function {
+ return &Function{Name: name, Overloads: overloads}
+}
+
+// NewFunctionWithDoc creates a serializable function and overload set.
+func NewFunctionWithDoc(name, doc string, overloads ...*Overload) *Function {
+ return &Function{Name: name, Description: doc, Overloads: overloads}
+}
+
+// Function represents the serializable format of a function and its overloads.
+type Function struct {
+ Name string `yaml:"name"`
+ Description string `yaml:"description,omitempty"`
+ Overloads []*Overload `yaml:"overloads,omitempty"`
+}
+
+// Validate validates the function configuration is well-formed.
+func (fn *Function) Validate() error {
+ if fn == nil {
+ return errors.New("invalid function: nil")
+ }
+ if fn.Name == "" {
+ return errors.New("invalid function: missing function name")
+ }
+ if len(fn.Overloads) == 0 {
+ return fmt.Errorf("invalid function %q: missing overloads", fn.Name)
+ }
+ var errs []error
+ for _, o := range fn.Overloads {
+ if err := o.Validate(); err != nil {
+ errs = append(errs, fmt.Errorf("invalid function %q: %w", fn.Name, err))
+ }
+ }
+ return errors.Join(errs...)
+}
+
+// AsCELFunction converts the serializable form of the Function into CEL environment declaration.
+func (fn *Function) AsCELFunction(tp types.Provider) (*decls.FunctionDecl, error) {
+ if err := fn.Validate(); err != nil {
+ return nil, err
+ }
+ opts := make([]decls.FunctionOpt, 0, len(fn.Overloads)+1)
+ for _, o := range fn.Overloads {
+ opt, err := o.AsFunctionOption(tp)
+ opts = append(opts, opt)
+ if err != nil {
+ return nil, fmt.Errorf("invalid function %q: %w", fn.Name, err)
+ }
+ }
+ if len(fn.Description) != 0 {
+ opts = append(opts, decls.FunctionDocs(fn.Description))
+ }
+ return decls.NewFunction(fn.Name, opts...)
+}
+
+// NewOverload returns a new serializable representation of a global overload.
+func NewOverload(id string, args []*TypeDesc, ret *TypeDesc, examples ...string) *Overload {
+ return &Overload{ID: id, Args: args, Return: ret, Examples: examples}
+}
+
+// NewMemberOverload returns a new serializable representation of a member (receiver) overload.
+func NewMemberOverload(id string, target *TypeDesc, args []*TypeDesc, ret *TypeDesc, examples ...string) *Overload {
+ return &Overload{ID: id, Target: target, Args: args, Return: ret, Examples: examples}
+}
+
+// Overload represents the serializable format of a function overload.
+type Overload struct {
+ ID string `yaml:"id"`
+ Examples []string `yaml:"examples,omitempty"`
+ Target *TypeDesc `yaml:"target,omitempty"`
+ Args []*TypeDesc `yaml:"args,omitempty"`
+ Return *TypeDesc `yaml:"return,omitempty"`
+}
+
+// Validate validates the overload configuration is well-formed.
+func (od *Overload) Validate() error {
+ if od == nil {
+ return errors.New("invalid overload: nil")
+ }
+ if od.ID == "" {
+ return errors.New("invalid overload: missing overload id")
+ }
+ var errs []error
+ if od.Target != nil {
+ if err := od.Target.Validate(); err != nil {
+ errs = append(errs, fmt.Errorf("invalid overload %q target: %w", od.ID, err))
+ }
+ }
+ for i, arg := range od.Args {
+ if err := arg.Validate(); err != nil {
+ errs = append(errs, fmt.Errorf("invalid overload %q arg[%d]: %w", od.ID, i, err))
+ }
+ }
+ if err := od.Return.Validate(); err != nil {
+ errs = append(errs, fmt.Errorf("invalid overload %q return: %w", od.ID, err))
+ }
+ return errors.Join(errs...)
+}
+
+// AsFunctionOption converts the serializable form of the Overload into a function declaration option.
+func (od *Overload) AsFunctionOption(tp types.Provider) (decls.FunctionOpt, error) {
+ if err := od.Validate(); err != nil {
+ return nil, err
+ }
+ args := make([]*types.Type, len(od.Args))
+ var err error
+ var errs []error
+ for i, a := range od.Args {
+ args[i], err = a.AsCELType(tp)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+ result, err := od.Return.AsCELType(tp)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ if od.Target != nil {
+ t, err := od.Target.AsCELType(tp)
+ if err != nil {
+ return nil, errors.Join(append(errs, err)...)
+ }
+ args = append([]*types.Type{t}, args...)
+ return decls.MemberOverload(od.ID, args, result), nil
+ }
+ if len(errs) != 0 {
+ return nil, errors.Join(errs...)
+ }
+ return decls.Overload(od.ID, args, result, decls.OverloadExamples(od.Examples...)), nil
+}
+
+// NewExtension creates a serializable Extension from a name and version string.
+func NewExtension(name string, version uint32) *Extension {
+ versionString := "latest"
+ if version < math.MaxUint32 {
+ versionString = strconv.FormatUint(uint64(version), 10)
+ }
+ return &Extension{
+ Name: name,
+ Version: versionString,
+ }
+}
+
+// Extension represents a named and optionally versioned extension library configured in the environment.
+type Extension struct {
+ // Name is either the LibraryName() or some short-hand simple identifier which is understood by the config-handler.
+ Name string `yaml:"name"`
+
+ // Version may either be an unsigned long value or the string 'latest'. If empty, the value is treated as '0'.
+ Version string `yaml:"version,omitempty"`
+}
+
+// Validate validates the extension configuration is well-formed.
+func (e *Extension) Validate() error {
+ _, err := e.VersionNumber()
+ return err
+}
+
+// VersionNumber returns the parsed version string, or an error if the version cannot be parsed.
+func (e *Extension) VersionNumber() (uint32, error) {
+ if e == nil {
+ return 0, fmt.Errorf("invalid extension: nil")
+ }
+ if e.Name == "" {
+ return 0, fmt.Errorf("invalid extension: missing name")
+ }
+ if e.Version == "latest" {
+ return math.MaxUint32, nil
+ }
+ if e.Version == "" {
+ return 0, nil
+ }
+ ver, err := strconv.ParseUint(e.Version, 10, 32)
+ if err != nil {
+ return 0, fmt.Errorf("invalid extension %q version: %w", e.Name, err)
+ }
+ return uint32(ver), nil
+}
+
+// NewLibrarySubset returns an empty library subsetting config which permits all library features.
+func NewLibrarySubset() *LibrarySubset {
+ return &LibrarySubset{}
+}
+
+// LibrarySubset indicates a subset of the macros and function supported by a subsettable library.
+type LibrarySubset struct {
+ // Disabled indicates whether the library has been disabled, typically only used for
+ // default-enabled libraries like stdlib.
+ Disabled bool `yaml:"disabled,omitempty"`
+
+ // DisableMacros disables macros for the given library.
+ DisableMacros bool `yaml:"disable_macros,omitempty"`
+
+ // IncludeMacros specifies a set of macro function names to include in the subset.
+ IncludeMacros []string `yaml:"include_macros,omitempty"`
+
+ // ExcludeMacros specifies a set of macro function names to exclude from the subset.
+ // Note: if IncludeMacros is non-empty, then ExcludeFunctions is ignored.
+ ExcludeMacros []string `yaml:"exclude_macros,omitempty"`
+
+ // IncludeFunctions specifies a set of functions to include in the subset.
+ //
+ // Note: the overloads specified in the subset need only specify their ID.
+ // Note: if IncludeFunctions is non-empty, then ExcludeFunctions is ignored.
+ IncludeFunctions []*Function `yaml:"include_functions,omitempty"`
+
+ // ExcludeFunctions specifies the set of functions to exclude from the subset.
+ //
+ // Note: the overloads specified in the subset need only specify their ID.
+ ExcludeFunctions []*Function `yaml:"exclude_functions,omitempty"`
+}
+
+// Validate validates the library configuration is well-formed.
+//
+// For example, setting both the IncludeMacros and ExcludeMacros together could be confusing
+// and create a broken expectation, likewise for IncludeFunctions and ExcludeFunctions.
+func (lib *LibrarySubset) Validate() error {
+ if lib == nil {
+ return nil
+ }
+ var errs []error
+ if len(lib.IncludeMacros) != 0 && len(lib.ExcludeMacros) != 0 {
+ errs = append(errs, errors.New("invalid subset: cannot both include and exclude macros"))
+ }
+ if len(lib.IncludeFunctions) != 0 && len(lib.ExcludeFunctions) != 0 {
+ errs = append(errs, errors.New("invalid subset: cannot both include and exclude functions"))
+ }
+ return errors.Join(errs...)
+}
+
+// SubsetFunction produces a function declaration which matches the supported subset, or nil
+// if the function is not supported by the LibrarySubset.
+//
+// For IncludeFunctions, if the function does not specify a set of overloads to include, the
+// whole function definition is included. If overloads are set, then a new function which
+// includes only the specified overloads is produced.
+//
+// For ExcludeFunctions, if the function does not specify a set of overloads to exclude, the
+// whole function definition is excluded. If overloads are set, then a new function which
+// includes only the permitted overloads is produced.
+func (lib *LibrarySubset) SubsetFunction(fn *decls.FunctionDecl) (*decls.FunctionDecl, bool) {
+ // When lib is null, it should indicate that all values are included in the subset.
+ if lib == nil {
+ return fn, true
+ }
+ if lib.Disabled {
+ return nil, false
+ }
+ if len(lib.IncludeFunctions) != 0 {
+ for _, include := range lib.IncludeFunctions {
+ if include.Name != fn.Name() {
+ continue
+ }
+ if len(include.Overloads) == 0 {
+ return fn, true
+ }
+ overloadIDs := make([]string, len(include.Overloads))
+ for i, o := range include.Overloads {
+ overloadIDs[i] = o.ID
+ }
+ return fn.Subset(decls.IncludeOverloads(overloadIDs...)), true
+ }
+ return nil, false
+ }
+ if len(lib.ExcludeFunctions) != 0 {
+ for _, exclude := range lib.ExcludeFunctions {
+ if exclude.Name != fn.Name() {
+ continue
+ }
+ if len(exclude.Overloads) == 0 {
+ return nil, false
+ }
+ overloadIDs := make([]string, len(exclude.Overloads))
+ for i, o := range exclude.Overloads {
+ overloadIDs[i] = o.ID
+ }
+ return fn.Subset(decls.ExcludeOverloads(overloadIDs...)), true
+ }
+ return fn, true
+ }
+ return fn, true
+}
+
+// SubsetMacro indicates whether the macro function should be included in the library subset.
+func (lib *LibrarySubset) SubsetMacro(macroFunction string) bool {
+ // When lib is null, it should indicate that all values are included in the subset.
+ if lib == nil {
+ return true
+ }
+ if lib.Disabled || lib.DisableMacros {
+ return false
+ }
+ if len(lib.IncludeMacros) != 0 {
+ for _, name := range lib.IncludeMacros {
+ if name == macroFunction {
+ return true
+ }
+ }
+ return false
+ }
+ if len(lib.ExcludeMacros) != 0 {
+ for _, name := range lib.ExcludeMacros {
+ if name == macroFunction {
+ return false
+ }
+ }
+ return true
+ }
+ return true
+}
+
+// SetDisabled disables or enables the library.
+func (lib *LibrarySubset) SetDisabled(value bool) *LibrarySubset {
+ lib.Disabled = value
+ return lib
+}
+
+// SetDisableMacros disables the macros for the library.
+func (lib *LibrarySubset) SetDisableMacros(value bool) *LibrarySubset {
+ lib.DisableMacros = value
+ return lib
+}
+
+// AddIncludedMacros allow-lists one or more macros by function name.
+//
+// Note, this option will override any excluded macros.
+func (lib *LibrarySubset) AddIncludedMacros(macros ...string) *LibrarySubset {
+ lib.IncludeMacros = append(lib.IncludeMacros, macros...)
+ return lib
+}
+
+// AddExcludedMacros deny-lists one or more macros by function name.
+func (lib *LibrarySubset) AddExcludedMacros(macros ...string) *LibrarySubset {
+ lib.ExcludeMacros = append(lib.ExcludeMacros, macros...)
+ return lib
+}
+
+// AddIncludedFunctions allow-lists one or more functions from the subset.
+//
+// Note, this option will override any excluded functions.
+func (lib *LibrarySubset) AddIncludedFunctions(funcs ...*Function) *LibrarySubset {
+ lib.IncludeFunctions = append(lib.IncludeFunctions, funcs...)
+ return lib
+}
+
+// AddExcludedFunctions deny-lists one or more functions from the subset.
+func (lib *LibrarySubset) AddExcludedFunctions(funcs ...*Function) *LibrarySubset {
+ lib.ExcludeFunctions = append(lib.ExcludeFunctions, funcs...)
+ return lib
+}
+
+// NewValidator returns a named Validator instance.
+func NewValidator(name string) *Validator {
+ return &Validator{Name: name}
+}
+
+// Validator represents a named validator with an optional map-based configuration object.
+//
+// Note: the map-keys must directly correspond to the internal representation of the original
+// validator, and should only use primitive scalar types as values at this time.
+type Validator struct {
+ Name string `yaml:"name"`
+ Config map[string]any `yaml:"config,omitempty"`
+}
+
+// Validate validates the configuration of the validator object.
+func (v *Validator) Validate() error {
+ if v == nil {
+ return errors.New("invalid validator: nil")
+ }
+ if v.Name == "" {
+ return errors.New("invalid validator: missing name")
+ }
+ return nil
+}
+
+// SetConfig sets the set of map key-value pairs associated with this validator's configuration.
+func (v *Validator) SetConfig(config map[string]any) *Validator {
+ v.Config = config
+ return v
+}
+
+// ConfigValue retrieves the value associated with the config key name, if one exists.
+func (v *Validator) ConfigValue(name string) (any, bool) {
+ if v == nil {
+ return nil, false
+ }
+ value, found := v.Config[name]
+ return value, found
+}
+
+// NewFeature creates a new feature flag with a boolean enablement flag.
+func NewFeature(name string, enabled bool) *Feature {
+ return &Feature{Name: name, Enabled: enabled}
+}
+
+// Feature represents a named boolean feature flag supported by CEL.
+type Feature struct {
+ Name string `yaml:"name"`
+ Enabled bool `yaml:"enabled"`
+}
+
+// Validate validates whether the feature is well-configured.
+func (feat *Feature) Validate() error {
+ if feat == nil {
+ return errors.New("invalid feature: nil")
+ }
+ if feat.Name == "" {
+ return errors.New("invalid feature: missing name")
+ }
+ return nil
+}
+
+// NewTypeDesc describes a simple or complex type with parameters.
+func NewTypeDesc(typeName string, params ...*TypeDesc) *TypeDesc {
+ return &TypeDesc{TypeName: typeName, Params: params}
+}
+
+// NewTypeParam describe a type-param type.
+func NewTypeParam(paramName string) *TypeDesc {
+ return &TypeDesc{TypeName: paramName, IsTypeParam: true}
+}
+
+// TypeDesc represents the serializable format of a CEL *types.Type value.
+type TypeDesc struct {
+ TypeName string `yaml:"type_name"`
+ Params []*TypeDesc `yaml:"params,omitempty"`
+ IsTypeParam bool `yaml:"is_type_param,omitempty"`
+}
+
+// String implements the strings.Stringer interface method.
+func (td *TypeDesc) String() string {
+ ps := make([]string, len(td.Params))
+ for i, p := range td.Params {
+ ps[i] = p.String()
+ }
+ typeName := td.TypeName
+ if len(ps) != 0 {
+ typeName = fmt.Sprintf("%s(%s)", typeName, strings.Join(ps, ","))
+ }
+ return typeName
+}
+
+// Validate validates the type configuration is well-formed.
+func (td *TypeDesc) Validate() error {
+ if td == nil {
+ return errors.New("invalid type: nil")
+ }
+ if td.TypeName == "" {
+ return errors.New("invalid type: missing type name")
+ }
+ if td.IsTypeParam && len(td.Params) != 0 {
+ return errors.New("invalid type: param type cannot have parameters")
+ }
+ switch td.TypeName {
+ case "list":
+ if len(td.Params) != 1 {
+ return fmt.Errorf("invalid type: list expects 1 parameter, got %d", len(td.Params))
+ }
+ return td.Params[0].Validate()
+ case "map":
+ if len(td.Params) != 2 {
+ return fmt.Errorf("invalid type: map expects 2 parameters, got %d", len(td.Params))
+ }
+ if err := td.Params[0].Validate(); err != nil {
+ return err
+ }
+ if err := td.Params[1].Validate(); err != nil {
+ return err
+ }
+ case "optional_type":
+ if len(td.Params) != 1 {
+ return fmt.Errorf("invalid type: optional_type expects 1 parameter, got %d", len(td.Params))
+ }
+ return td.Params[0].Validate()
+ default:
+ }
+ return nil
+}
+
+// AsCELType converts the serializable object to a *types.Type value.
+func (td *TypeDesc) AsCELType(tp types.Provider) (*types.Type, error) {
+ err := td.Validate()
+ if err != nil {
+ return nil, err
+ }
+ switch td.TypeName {
+ case "dyn":
+ return types.DynType, nil
+ case "map":
+ kt, err := td.Params[0].AsCELType(tp)
+ if err != nil {
+ return nil, err
+ }
+ vt, err := td.Params[1].AsCELType(tp)
+ if err != nil {
+ return nil, err
+ }
+ return types.NewMapType(kt, vt), nil
+ case "list":
+ et, err := td.Params[0].AsCELType(tp)
+ if err != nil {
+ return nil, err
+ }
+ return types.NewListType(et), nil
+ case "optional_type":
+ et, err := td.Params[0].AsCELType(tp)
+ if err != nil {
+ return nil, err
+ }
+ return types.NewOptionalType(et), nil
+ default:
+ if td.IsTypeParam {
+ return types.NewTypeParamType(td.TypeName), nil
+ }
+ if msgType, found := tp.FindStructType(td.TypeName); found {
+ // First parameter is the type name.
+ return msgType.Parameters()[0], nil
+ }
+ t, found := tp.FindIdent(td.TypeName)
+ if !found {
+ return nil, fmt.Errorf("undefined type name: %q", td.TypeName)
+ }
+ _, ok := t.(*types.Type)
+ if ok && len(td.Params) == 0 {
+ return t.(*types.Type), nil
+ }
+ params := make([]*types.Type, len(td.Params))
+ for i, p := range td.Params {
+ params[i], err = p.AsCELType(tp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return types.NewOpaqueType(td.TypeName, params...), nil
+ }
+}
+
+// SerializeTypeDesc converts a CEL native *types.Type to a serializable TypeDesc.
+func SerializeTypeDesc(t *types.Type) *TypeDesc {
+ typeName := t.TypeName()
+ if t.Kind() == types.TypeParamKind {
+ return NewTypeParam(typeName)
+ }
+ if t != types.NullType && t.IsAssignableType(types.NullType) {
+ if wrapperTypeName, found := wrapperTypes[t.Kind()]; found {
+ return NewTypeDesc(wrapperTypeName)
+ }
+ }
+ var params []*TypeDesc
+ for _, p := range t.Parameters() {
+ params = append(params, SerializeTypeDesc(p))
+ }
+ return NewTypeDesc(typeName, params...)
+}
+
+var wrapperTypes = map[types.Kind]string{
+ types.BoolKind: "google.protobuf.BoolValue",
+ types.BytesKind: "google.protobuf.BytesValue",
+ types.DoubleKind: "google.protobuf.DoubleValue",
+ types.IntKind: "google.protobuf.Int64Value",
+ types.StringKind: "google.protobuf.StringValue",
+ types.UintKind: "google.protobuf.UInt64Value",
+}
diff --git a/vendor/github.com/google/cel-go/common/error.go b/vendor/github.com/google/cel-go/common/error.go
index 774dcb5b4..0cf21345e 100644
--- a/vendor/github.com/google/cel-go/common/error.go
+++ b/vendor/github.com/google/cel-go/common/error.go
@@ -18,8 +18,6 @@ import (
"fmt"
"strings"
"unicode/utf8"
-
- "golang.org/x/text/width"
)
// NewError creates an error associated with an expression id with the given message at the given location.
@@ -35,18 +33,15 @@ type Error struct {
}
const (
- dot = "."
- ind = "^"
+ dot = "."
+ ind = "^"
+ wideDot = "\uff0e"
+ wideInd = "\uff3e"
// maxSnippetLength is the largest number of characters which can be rendered in an error message snippet.
maxSnippetLength = 16384
)
-var (
- wideDot = width.Widen.String(dot)
- wideInd = width.Widen.String(ind)
-)
-
// ToDisplayString decorates the error message with the source location.
func (e *Error) ToDisplayString(source Source) string {
var result = fmt.Sprintf("ERROR: %s:%d:%d: %s",
diff --git a/vendor/github.com/google/cel-go/common/errors.go b/vendor/github.com/google/cel-go/common/errors.go
index 63919714e..c8865df8c 100644
--- a/vendor/github.com/google/cel-go/common/errors.go
+++ b/vendor/github.com/google/cel-go/common/errors.go
@@ -30,9 +30,13 @@ type Errors struct {
// NewErrors creates a new instance of the Errors type.
func NewErrors(source Source) *Errors {
+ src := source
+ if src == nil {
+ src = NewTextSource("")
+ }
return &Errors{
errors: []*Error{},
- source: source,
+ source: src,
maxErrorsToReport: 100,
}
}
@@ -42,6 +46,11 @@ func (e *Errors) ReportError(l Location, format string, args ...any) {
e.ReportErrorAtID(0, l, format, args...)
}
+// ReportErrorString records an error at a source location.
+func (e *Errors) ReportErrorString(l Location, message string) {
+ e.ReportErrorAtID(0, l, "%s", message)
+}
+
// ReportErrorAtID records an error at a source location and expression id.
func (e *Errors) ReportErrorAtID(id int64, l Location, format string, args ...any) {
e.numErrors++
@@ -64,7 +73,7 @@ func (e *Errors) GetErrors() []*Error {
// Append creates a new Errors object with the current and input errors.
func (e *Errors) Append(errs []*Error) *Errors {
return &Errors{
- errors: append(e.errors, errs...),
+ errors: append(e.errors[:], errs...),
source: e.source,
numErrors: e.numErrors + len(errs),
maxErrorsToReport: e.maxErrorsToReport,
diff --git a/vendor/github.com/google/cel-go/common/runes/buffer.go b/vendor/github.com/google/cel-go/common/runes/buffer.go
index 50aac0b27..021198224 100644
--- a/vendor/github.com/google/cel-go/common/runes/buffer.go
+++ b/vendor/github.com/google/cel-go/common/runes/buffer.go
@@ -127,20 +127,48 @@ var nilBuffer = &emptyBuffer{}
// elements of the byte or uint16 array, and continue. The underlying storage is an rune array
// containing any Unicode character.
func NewBuffer(data string) Buffer {
+ buf, _ := newBuffer(data, false)
+ return buf
+}
+
+// NewBufferAndLineOffsets returns an efficient implementation of Buffer for the given text based on
+// the ranges of the encoded code points contained within, as well as returning the line offsets.
+//
+// Code points are represented as an array of byte, uint16, or rune. This approach ensures that
+// each index represents a code point by itself without needing to use an array of rune. At first
+// we assume all code points are less than or equal to '\u007f'. If this holds true, the
+// underlying storage is a byte array containing only ASCII characters. If we encountered a code
+// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the
+// elements of previous byte array to the uint16 array, and continue. If this holds true, the
+// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual
+// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous
+// elements of the byte or uint16 array, and continue. The underlying storage is an rune array
+// containing any Unicode character.
+func NewBufferAndLineOffsets(data string) (Buffer, []int32) {
+ return newBuffer(data, true)
+}
+
+func newBuffer(data string, lines bool) (Buffer, []int32) {
if len(data) == 0 {
- return nilBuffer
+ return nilBuffer, []int32{0}
}
var (
- idx = 0
- buf8 = make([]byte, 0, len(data))
+ idx = 0
+ off int32 = 0
+ buf8 = make([]byte, 0, len(data))
buf16 []uint16
buf32 []rune
+ offs []int32
)
for idx < len(data) {
r, s := utf8.DecodeRuneInString(data[idx:])
idx += s
+ if lines && r == '\n' {
+ offs = append(offs, off+1)
+ }
if r < utf8.RuneSelf {
buf8 = append(buf8, byte(r))
+ off++
continue
}
if r <= 0xffff {
@@ -150,6 +178,7 @@ func NewBuffer(data string) Buffer {
}
buf8 = nil
buf16 = append(buf16, uint16(r))
+ off++
goto copy16
}
buf32 = make([]rune, len(buf8), len(data))
@@ -158,17 +187,25 @@ func NewBuffer(data string) Buffer {
}
buf8 = nil
buf32 = append(buf32, r)
+ off++
goto copy32
}
+ if lines {
+ offs = append(offs, off+1)
+ }
return &asciiBuffer{
arr: buf8,
- }
+ }, offs
copy16:
for idx < len(data) {
r, s := utf8.DecodeRuneInString(data[idx:])
idx += s
+ if lines && r == '\n' {
+ offs = append(offs, off+1)
+ }
if r <= 0xffff {
buf16 = append(buf16, uint16(r))
+ off++
continue
}
buf32 = make([]rune, len(buf16), len(data))
@@ -177,18 +214,29 @@ copy16:
}
buf16 = nil
buf32 = append(buf32, r)
+ off++
goto copy32
}
+ if lines {
+ offs = append(offs, off+1)
+ }
return &basicBuffer{
arr: buf16,
- }
+ }, offs
copy32:
for idx < len(data) {
r, s := utf8.DecodeRuneInString(data[idx:])
idx += s
+ if lines && r == '\n' {
+ offs = append(offs, off+1)
+ }
buf32 = append(buf32, r)
+ off++
+ }
+ if lines {
+ offs = append(offs, off+1)
}
return &supplementalBuffer{
arr: buf32,
- }
+ }, offs
}
diff --git a/vendor/github.com/google/cel-go/common/source.go b/vendor/github.com/google/cel-go/common/source.go
index acf22bdf1..ec79cb545 100644
--- a/vendor/github.com/google/cel-go/common/source.go
+++ b/vendor/github.com/google/cel-go/common/source.go
@@ -15,9 +15,6 @@
package common
import (
- "strings"
- "unicode/utf8"
-
"github.com/google/cel-go/common/runes"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
@@ -80,17 +77,11 @@ func NewTextSource(text string) Source {
// NewStringSource creates a new Source from the given contents and description.
func NewStringSource(contents string, description string) Source {
// Compute line offsets up front as they are referred to frequently.
- lines := strings.Split(contents, "\n")
- offsets := make([]int32, len(lines))
- var offset int32
- for i, line := range lines {
- offset = offset + int32(utf8.RuneCountInString(line)) + 1
- offsets[int32(i)] = offset
- }
+ buf, offs := runes.NewBufferAndLineOffsets(contents)
return &sourceImpl{
- Buffer: runes.NewBuffer(contents),
+ Buffer: buf,
description: description,
- lineOffsets: offsets,
+ lineOffsets: offs,
}
}
@@ -172,9 +163,8 @@ func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) {
for _, lineOffset := range s.lineOffsets {
if lineOffset > characterOffset {
break
- } else {
- line++
}
+ line++
}
if line == 1 {
return line, 0
diff --git a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel
index c130a93f6..124dbea81 100644
--- a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel
@@ -12,7 +12,7 @@ go_library(
],
importpath = "github.com/google/cel-go/common/stdlib",
deps = [
- "//checker/decls:go_default_library",
+ "//common:go_default_library",
"//common/decls:go_default_library",
"//common/functions:go_default_library",
"//common/operators:go_default_library",
@@ -20,6 +20,5 @@ go_library(
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
- "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
],
)
\ No newline at end of file
diff --git a/vendor/github.com/google/cel-go/common/stdlib/standard.go b/vendor/github.com/google/cel-go/common/stdlib/standard.go
index d02cb64bf..4040a4f5c 100644
--- a/vendor/github.com/google/cel-go/common/stdlib/standard.go
+++ b/vendor/github.com/google/cel-go/common/stdlib/standard.go
@@ -16,6 +16,11 @@
package stdlib
import (
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/cel-go/common"
"github.com/google/cel-go/common/decls"
"github.com/google/cel-go/common/functions"
"github.com/google/cel-go/common/operators"
@@ -23,15 +28,12 @@ import (
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
var (
stdFunctions []*decls.FunctionDecl
- stdFnDecls []*exprpb.Decl
stdTypes []*decls.VariableDecl
- stdTypeDecls []*exprpb.Decl
+ utcTZ = types.String("UTC")
)
func init() {
@@ -55,32 +57,53 @@ func init() {
decls.TypeVariable(types.UintType),
}
- stdTypeDecls = make([]*exprpb.Decl, 0, len(stdTypes))
- for _, stdType := range stdTypes {
- typeVar, err := decls.VariableDeclToExprDecl(stdType)
- if err != nil {
- panic(err)
- }
- stdTypeDecls = append(stdTypeDecls, typeVar)
- }
-
stdFunctions = []*decls.FunctionDecl{
// Logical operators. Special-cased within the interpreter.
// Note, the singleton binding prevents extensions from overriding the operator behavior.
function(operators.Conditional,
+ decls.FunctionDocs(
+ `The ternary operator tests a boolean predicate and returns the left-hand side `+
+ `(truthy) expression if true, or the right-hand side (falsy) expression if false`),
decls.Overload(overloads.Conditional, argTypes(types.BoolType, paramA, paramA), paramA,
- decls.OverloadIsNonStrict()),
+ decls.OverloadIsNonStrict(),
+ decls.OverloadExamples(
+ `'hello'.contains('lo') ? 'hi' : 'bye' // 'hi'`,
+ `32 % 3 == 0 ? 'divisible' : 'not divisible' // 'not divisible'`)),
decls.SingletonFunctionBinding(noFunctionOverrides)),
+
function(operators.LogicalAnd,
+ decls.FunctionDocs(
+ `logically AND two boolean values. Errors and unknown values`,
+ `are valid inputs and will not halt evaluation.`),
decls.Overload(overloads.LogicalAnd, argTypes(types.BoolType, types.BoolType), types.BoolType,
- decls.OverloadIsNonStrict()),
+ decls.OverloadIsNonStrict(),
+ decls.OverloadExamples(
+ `true && true // true`,
+ `true && false // false`,
+ `error && true // error`,
+ `error && false // false`)),
decls.SingletonBinaryBinding(noBinaryOverrides)),
+
function(operators.LogicalOr,
+ decls.FunctionDocs(
+ `logically OR two boolean values. Errors and unknown values`,
+ `are valid inputs and will not halt evaluation.`),
decls.Overload(overloads.LogicalOr, argTypes(types.BoolType, types.BoolType), types.BoolType,
- decls.OverloadIsNonStrict()),
+ decls.OverloadIsNonStrict(),
+ decls.OverloadExamples(
+ `true || false // true`,
+ `false || false // false`,
+ `error || true // true`,
+ `error || error // true`)),
decls.SingletonBinaryBinding(noBinaryOverrides)),
+
function(operators.LogicalNot,
- decls.Overload(overloads.LogicalNot, argTypes(types.BoolType), types.BoolType),
+ decls.FunctionDocs(`logically negate a boolean value.`),
+ decls.Overload(overloads.LogicalNot, argTypes(types.BoolType), types.BoolType,
+ decls.OverloadExamples(
+ `!true // false`,
+ `!false // true`,
+ `!error // error`)),
decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
b, ok := val.(types.Bool)
if !ok {
@@ -103,66 +126,104 @@ func init() {
// Equality / inequality. Special-cased in the interpreter
function(operators.Equals,
- decls.Overload(overloads.Equals, argTypes(paramA, paramA), types.BoolType),
+ decls.FunctionDocs(`compare two values of the same type for equality`),
+ decls.Overload(overloads.Equals, argTypes(paramA, paramA), types.BoolType,
+ decls.OverloadExamples(
+ `1 == 1 // true`,
+ `'hello' == 'world' // false`,
+ `bytes('hello') == b'hello' // true`,
+ `duration('1h') == duration('60m') // true`,
+ `dyn(3.0) == 3 // true`)),
decls.SingletonBinaryBinding(noBinaryOverrides)),
function(operators.NotEquals,
- decls.Overload(overloads.NotEquals, argTypes(paramA, paramA), types.BoolType),
+ decls.FunctionDocs(`compare two values of the same type for inequality`),
+ decls.Overload(overloads.NotEquals, argTypes(paramA, paramA), types.BoolType,
+ decls.OverloadExamples(
+ `1 != 2 // true`,
+ `"a" != "a" // false`,
+ `3.0 != 3.1 // true`)),
decls.SingletonBinaryBinding(noBinaryOverrides)),
// Mathematical operators
function(operators.Add,
+ decls.FunctionDocs(
+ `adds two numeric values or concatenates two strings, bytes,`,
+ `or lists.`),
decls.Overload(overloads.AddBytes,
- argTypes(types.BytesType, types.BytesType), types.BytesType),
+ argTypes(types.BytesType, types.BytesType), types.BytesType,
+ decls.OverloadExamples(`b'hi' + bytes('ya') // b'hiya'`)),
decls.Overload(overloads.AddDouble,
- argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType,
+ decls.OverloadExamples(`3.14 + 1.59 // 4.73`)),
decls.Overload(overloads.AddDurationDuration,
- argTypes(types.DurationType, types.DurationType), types.DurationType),
+ argTypes(types.DurationType, types.DurationType), types.DurationType,
+ decls.OverloadExamples(`duration('1m') + duration('1s') // duration('1m1s')`)),
decls.Overload(overloads.AddDurationTimestamp,
- argTypes(types.DurationType, types.TimestampType), types.TimestampType),
+ argTypes(types.DurationType, types.TimestampType), types.TimestampType,
+ decls.OverloadExamples(`duration('24h') + timestamp('2023-01-01T00:00:00Z') // timestamp('2023-01-02T00:00:00Z')`)),
decls.Overload(overloads.AddTimestampDuration,
- argTypes(types.TimestampType, types.DurationType), types.TimestampType),
+ argTypes(types.TimestampType, types.DurationType), types.TimestampType,
+ decls.OverloadExamples(`timestamp('2023-01-01T00:00:00Z') + duration('24h1m2s') // timestamp('2023-01-02T00:01:02Z')`)),
decls.Overload(overloads.AddInt64,
- argTypes(types.IntType, types.IntType), types.IntType),
+ argTypes(types.IntType, types.IntType), types.IntType,
+ decls.OverloadExamples(`1 + 2 // 3`)),
decls.Overload(overloads.AddList,
- argTypes(listOfA, listOfA), listOfA),
+ argTypes(listOfA, listOfA), listOfA,
+ decls.OverloadExamples(`[1] + [2, 3] // [1, 2, 3]`)),
decls.Overload(overloads.AddString,
- argTypes(types.StringType, types.StringType), types.StringType),
+ argTypes(types.StringType, types.StringType), types.StringType,
+ decls.OverloadExamples(`"Hello, " + "world!" // "Hello, world!"`)),
decls.Overload(overloads.AddUint64,
- argTypes(types.UintType, types.UintType), types.UintType),
+ argTypes(types.UintType, types.UintType), types.UintType,
+ decls.OverloadExamples(`22u + 33u // 55u`)),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
return lhs.(traits.Adder).Add(rhs)
}, traits.AdderType)),
function(operators.Divide,
+ decls.FunctionDocs(`divide two numbers`),
decls.Overload(overloads.DivideDouble,
- argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType,
+ decls.OverloadExamples(`7.0 / 2.0 // 3.5`)),
decls.Overload(overloads.DivideInt64,
- argTypes(types.IntType, types.IntType), types.IntType),
+ argTypes(types.IntType, types.IntType), types.IntType,
+ decls.OverloadExamples(`10 / 2 // 5`)),
decls.Overload(overloads.DivideUint64,
- argTypes(types.UintType, types.UintType), types.UintType),
+ argTypes(types.UintType, types.UintType), types.UintType,
+ decls.OverloadExamples(`42u / 2u // 21u`)),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
return lhs.(traits.Divider).Divide(rhs)
}, traits.DividerType)),
function(operators.Modulo,
+ decls.FunctionDocs(`compute the modulus of one integer into another`),
decls.Overload(overloads.ModuloInt64,
- argTypes(types.IntType, types.IntType), types.IntType),
+ argTypes(types.IntType, types.IntType), types.IntType,
+ decls.OverloadExamples(`3 % 2 // 1`)),
decls.Overload(overloads.ModuloUint64,
- argTypes(types.UintType, types.UintType), types.UintType),
+ argTypes(types.UintType, types.UintType), types.UintType,
+ decls.OverloadExamples(`6u % 3u // 0u`)),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
return lhs.(traits.Modder).Modulo(rhs)
}, traits.ModderType)),
function(operators.Multiply,
+ decls.FunctionDocs(`multiply two numbers`),
decls.Overload(overloads.MultiplyDouble,
- argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType,
+ decls.OverloadExamples(`3.5 * 40.0 // 140.0`)),
decls.Overload(overloads.MultiplyInt64,
- argTypes(types.IntType, types.IntType), types.IntType),
+ argTypes(types.IntType, types.IntType), types.IntType,
+ decls.OverloadExamples(`-2 * 6 // -12`)),
decls.Overload(overloads.MultiplyUint64,
- argTypes(types.UintType, types.UintType), types.UintType),
+ argTypes(types.UintType, types.UintType), types.UintType,
+ decls.OverloadExamples(`13u * 3u // 39u`)),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
return lhs.(traits.Multiplier).Multiply(rhs)
}, traits.MultiplierType)),
function(operators.Negate,
- decls.Overload(overloads.NegateDouble, argTypes(types.DoubleType), types.DoubleType),
- decls.Overload(overloads.NegateInt64, argTypes(types.IntType), types.IntType),
+ decls.FunctionDocs(`negate a numeric value`),
+ decls.Overload(overloads.NegateDouble, argTypes(types.DoubleType), types.DoubleType,
+ decls.OverloadExamples(`-(3.14) // -3.14`)),
+ decls.Overload(overloads.NegateInt64, argTypes(types.IntType), types.IntType,
+ decls.OverloadExamples(`-(5) // -5`)),
decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
if types.IsBool(val) {
return types.MaybeNoSuchOverloadErr(val)
@@ -170,18 +231,32 @@ func init() {
return val.(traits.Negater).Negate()
}, traits.NegatorType)),
function(operators.Subtract,
+ decls.FunctionDocs(`subtract two numbers, or two time-related values`),
decls.Overload(overloads.SubtractDouble,
- argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType,
+ decls.OverloadExamples(`10.5 - 2.0 // 8.5`)),
decls.Overload(overloads.SubtractDurationDuration,
- argTypes(types.DurationType, types.DurationType), types.DurationType),
+ argTypes(types.DurationType, types.DurationType), types.DurationType,
+ decls.OverloadExamples(`duration('1m') - duration('1s') // duration('59s')`)),
decls.Overload(overloads.SubtractInt64,
- argTypes(types.IntType, types.IntType), types.IntType),
+ argTypes(types.IntType, types.IntType), types.IntType,
+ decls.OverloadExamples(`5 - 3 // 2`)),
decls.Overload(overloads.SubtractTimestampDuration,
- argTypes(types.TimestampType, types.DurationType), types.TimestampType),
+ argTypes(types.TimestampType, types.DurationType), types.TimestampType,
+ decls.OverloadExamples(common.MultilineDescription(
+ `timestamp('2023-01-10T12:00:00Z')`,
+ ` - duration('12h') // timestamp('2023-01-10T00:00:00Z')`))),
decls.Overload(overloads.SubtractTimestampTimestamp,
- argTypes(types.TimestampType, types.TimestampType), types.DurationType),
+ argTypes(types.TimestampType, types.TimestampType), types.DurationType,
+ decls.OverloadExamples(common.MultilineDescription(
+ `timestamp('2023-01-10T12:00:00Z')`,
+ ` - timestamp('2023-01-10T00:00:00Z') // duration('12h')`))),
decls.Overload(overloads.SubtractUint64,
- argTypes(types.UintType, types.UintType), types.UintType),
+ argTypes(types.UintType, types.UintType), types.UintType,
+ decls.OverloadExamples(common.MultilineDescription(
+ `// the subtraction result must be positive, otherwise an overflow`,
+ `// error is generated.`,
+ `42u - 3u // 39u`))),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
return lhs.(traits.Subtractor).Subtract(rhs)
}, traits.SubtractorType)),
@@ -189,34 +264,51 @@ func init() {
// Relations operators
function(operators.Less,
+ decls.FunctionDocs(
+ `compare two values and return true if the first value is`,
+ `less than the second`),
decls.Overload(overloads.LessBool,
- argTypes(types.BoolType, types.BoolType), types.BoolType),
+ argTypes(types.BoolType, types.BoolType), types.BoolType,
+ decls.OverloadExamples(`false < true // true`)),
decls.Overload(overloads.LessInt64,
- argTypes(types.IntType, types.IntType), types.BoolType),
+ argTypes(types.IntType, types.IntType), types.BoolType,
+ decls.OverloadExamples(`-2 < 3 // true`, `1 < 0 // false`)),
decls.Overload(overloads.LessInt64Double,
- argTypes(types.IntType, types.DoubleType), types.BoolType),
+ argTypes(types.IntType, types.DoubleType), types.BoolType,
+ decls.OverloadExamples(`1 < 1.1 // true`)),
decls.Overload(overloads.LessInt64Uint64,
- argTypes(types.IntType, types.UintType), types.BoolType),
+ argTypes(types.IntType, types.UintType), types.BoolType,
+ decls.OverloadExamples(`1 < 2u // true`)),
decls.Overload(overloads.LessUint64,
- argTypes(types.UintType, types.UintType), types.BoolType),
+ argTypes(types.UintType, types.UintType), types.BoolType,
+ decls.OverloadExamples(`1u < 2u // true`)),
decls.Overload(overloads.LessUint64Double,
- argTypes(types.UintType, types.DoubleType), types.BoolType),
+ argTypes(types.UintType, types.DoubleType), types.BoolType,
+ decls.OverloadExamples(`1u < 0.9 // false`)),
decls.Overload(overloads.LessUint64Int64,
- argTypes(types.UintType, types.IntType), types.BoolType),
+ argTypes(types.UintType, types.IntType), types.BoolType,
+ decls.OverloadExamples(`1u < 23 // true`, `1u < -1 // false`)),
decls.Overload(overloads.LessDouble,
- argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType,
+ decls.OverloadExamples(`2.0 < 2.4 // true`)),
decls.Overload(overloads.LessDoubleInt64,
- argTypes(types.DoubleType, types.IntType), types.BoolType),
+ argTypes(types.DoubleType, types.IntType), types.BoolType,
+ decls.OverloadExamples(`2.1 < 3 // true`)),
decls.Overload(overloads.LessDoubleUint64,
- argTypes(types.DoubleType, types.UintType), types.BoolType),
+ argTypes(types.DoubleType, types.UintType), types.BoolType,
+ decls.OverloadExamples(`2.3 < 2u // false`, `-1.0 < 1u // true`)),
decls.Overload(overloads.LessString,
- argTypes(types.StringType, types.StringType), types.BoolType),
+ argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.OverloadExamples(`'a' < 'b' // true`, `'cat' < 'cab' // false`)),
decls.Overload(overloads.LessBytes,
- argTypes(types.BytesType, types.BytesType), types.BoolType),
+ argTypes(types.BytesType, types.BytesType), types.BoolType,
+ decls.OverloadExamples(`b'hello' < b'world' // true`)),
decls.Overload(overloads.LessTimestamp,
- argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType,
+ decls.OverloadExamples(`timestamp('2001-01-01T02:03:04Z') < timestamp('2002-02-02T02:03:04Z') // true`)),
decls.Overload(overloads.LessDuration,
- argTypes(types.DurationType, types.DurationType), types.BoolType),
+ argTypes(types.DurationType, types.DurationType), types.BoolType,
+ decls.OverloadExamples(`duration('1ms') < duration('1s') // true`)),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
cmp := lhs.(traits.Comparer).Compare(rhs)
if cmp == types.IntNegOne {
@@ -229,34 +321,51 @@ func init() {
}, traits.ComparerType)),
function(operators.LessEquals,
+ decls.FunctionDocs(
+ `compare two values and return true if the first value is`,
+ `less than or equal to the second`),
decls.Overload(overloads.LessEqualsBool,
- argTypes(types.BoolType, types.BoolType), types.BoolType),
+ argTypes(types.BoolType, types.BoolType), types.BoolType,
+ decls.OverloadExamples(`false <= true // true`)),
decls.Overload(overloads.LessEqualsInt64,
- argTypes(types.IntType, types.IntType), types.BoolType),
+ argTypes(types.IntType, types.IntType), types.BoolType,
+ decls.OverloadExamples(`-2 <= 3 // true`)),
decls.Overload(overloads.LessEqualsInt64Double,
- argTypes(types.IntType, types.DoubleType), types.BoolType),
+ argTypes(types.IntType, types.DoubleType), types.BoolType,
+ decls.OverloadExamples(`1 <= 1.1 // true`)),
decls.Overload(overloads.LessEqualsInt64Uint64,
- argTypes(types.IntType, types.UintType), types.BoolType),
+ argTypes(types.IntType, types.UintType), types.BoolType,
+ decls.OverloadExamples(`1 <= 2u // true`, `-1 <= 0u // true`)),
decls.Overload(overloads.LessEqualsUint64,
- argTypes(types.UintType, types.UintType), types.BoolType),
+ argTypes(types.UintType, types.UintType), types.BoolType,
+ decls.OverloadExamples(`1u <= 2u // true`)),
decls.Overload(overloads.LessEqualsUint64Double,
- argTypes(types.UintType, types.DoubleType), types.BoolType),
+ argTypes(types.UintType, types.DoubleType), types.BoolType,
+ decls.OverloadExamples(`1u <= 1.0 // true`, `1u <= 1.1 // true`)),
decls.Overload(overloads.LessEqualsUint64Int64,
- argTypes(types.UintType, types.IntType), types.BoolType),
+ argTypes(types.UintType, types.IntType), types.BoolType,
+ decls.OverloadExamples(`1u <= 23 // true`)),
decls.Overload(overloads.LessEqualsDouble,
- argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType,
+ decls.OverloadExamples(`2.0 <= 2.4 // true`)),
decls.Overload(overloads.LessEqualsDoubleInt64,
- argTypes(types.DoubleType, types.IntType), types.BoolType),
+ argTypes(types.DoubleType, types.IntType), types.BoolType,
+ decls.OverloadExamples(`2.1 <= 3 // true`)),
decls.Overload(overloads.LessEqualsDoubleUint64,
- argTypes(types.DoubleType, types.UintType), types.BoolType),
+ argTypes(types.DoubleType, types.UintType), types.BoolType,
+ decls.OverloadExamples(`2.0 <= 2u // true`, `-1.0 <= 1u // true`)),
decls.Overload(overloads.LessEqualsString,
- argTypes(types.StringType, types.StringType), types.BoolType),
+ argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.OverloadExamples(`'a' <= 'b' // true`, `'a' <= 'a' // true`, `'cat' <= 'cab' // false`)),
decls.Overload(overloads.LessEqualsBytes,
- argTypes(types.BytesType, types.BytesType), types.BoolType),
+ argTypes(types.BytesType, types.BytesType), types.BoolType,
+ decls.OverloadExamples(`b'hello' <= b'world' // true`)),
decls.Overload(overloads.LessEqualsTimestamp,
- argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType,
+ decls.OverloadExamples(`timestamp('2001-01-01T02:03:04Z') <= timestamp('2002-02-02T02:03:04Z') // true`)),
decls.Overload(overloads.LessEqualsDuration,
- argTypes(types.DurationType, types.DurationType), types.BoolType),
+ argTypes(types.DurationType, types.DurationType), types.BoolType,
+ decls.OverloadExamples(`duration('1ms') <= duration('1s') // true`)),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
cmp := lhs.(traits.Comparer).Compare(rhs)
if cmp == types.IntNegOne || cmp == types.IntZero {
@@ -269,34 +378,51 @@ func init() {
}, traits.ComparerType)),
function(operators.Greater,
+ decls.FunctionDocs(
+ `compare two values and return true if the first value is`,
+ `greater than the second`),
decls.Overload(overloads.GreaterBool,
- argTypes(types.BoolType, types.BoolType), types.BoolType),
+ argTypes(types.BoolType, types.BoolType), types.BoolType,
+ decls.OverloadExamples(`true > false // true`)),
decls.Overload(overloads.GreaterInt64,
- argTypes(types.IntType, types.IntType), types.BoolType),
+ argTypes(types.IntType, types.IntType), types.BoolType,
+ decls.OverloadExamples(`3 > -2 // true`)),
decls.Overload(overloads.GreaterInt64Double,
- argTypes(types.IntType, types.DoubleType), types.BoolType),
+ argTypes(types.IntType, types.DoubleType), types.BoolType,
+ decls.OverloadExamples(`2 > 1.1 // true`)),
decls.Overload(overloads.GreaterInt64Uint64,
- argTypes(types.IntType, types.UintType), types.BoolType),
+ argTypes(types.IntType, types.UintType), types.BoolType,
+ decls.OverloadExamples(`3 > 2u // true`)),
decls.Overload(overloads.GreaterUint64,
- argTypes(types.UintType, types.UintType), types.BoolType),
+ argTypes(types.UintType, types.UintType), types.BoolType,
+ decls.OverloadExamples(`2u > 1u // true`)),
decls.Overload(overloads.GreaterUint64Double,
- argTypes(types.UintType, types.DoubleType), types.BoolType),
+ argTypes(types.UintType, types.DoubleType), types.BoolType,
+ decls.OverloadExamples(`2u > 1.9 // true`)),
decls.Overload(overloads.GreaterUint64Int64,
- argTypes(types.UintType, types.IntType), types.BoolType),
+ argTypes(types.UintType, types.IntType), types.BoolType,
+ decls.OverloadExamples(`23u > 1 // true`, `0u > -1 // true`)),
decls.Overload(overloads.GreaterDouble,
- argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType,
+ decls.OverloadExamples(`2.4 > 2.0 // true`)),
decls.Overload(overloads.GreaterDoubleInt64,
- argTypes(types.DoubleType, types.IntType), types.BoolType),
+ argTypes(types.DoubleType, types.IntType), types.BoolType,
+ decls.OverloadExamples(`3.1 > 3 // true`, `3.0 > 3 // false`)),
decls.Overload(overloads.GreaterDoubleUint64,
- argTypes(types.DoubleType, types.UintType), types.BoolType),
+ argTypes(types.DoubleType, types.UintType), types.BoolType,
+ decls.OverloadExamples(`2.3 > 2u // true`)),
decls.Overload(overloads.GreaterString,
- argTypes(types.StringType, types.StringType), types.BoolType),
+ argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.OverloadExamples(`'b' > 'a' // true`)),
decls.Overload(overloads.GreaterBytes,
- argTypes(types.BytesType, types.BytesType), types.BoolType),
+ argTypes(types.BytesType, types.BytesType), types.BoolType,
+ decls.OverloadExamples(`b'world' > b'hello' // true`)),
decls.Overload(overloads.GreaterTimestamp,
- argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType,
+ decls.OverloadExamples(`timestamp('2002-02-02T02:03:04Z') > timestamp('2001-01-01T02:03:04Z') // true`)),
decls.Overload(overloads.GreaterDuration,
- argTypes(types.DurationType, types.DurationType), types.BoolType),
+ argTypes(types.DurationType, types.DurationType), types.BoolType,
+ decls.OverloadExamples(`duration('1ms') > duration('1us') // true`)),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
cmp := lhs.(traits.Comparer).Compare(rhs)
if cmp == types.IntOne {
@@ -309,34 +435,51 @@ func init() {
}, traits.ComparerType)),
function(operators.GreaterEquals,
+ decls.FunctionDocs(
+ `compare two values and return true if the first value is`,
+ `greater than or equal to the second`),
decls.Overload(overloads.GreaterEqualsBool,
- argTypes(types.BoolType, types.BoolType), types.BoolType),
+ argTypes(types.BoolType, types.BoolType), types.BoolType,
+ decls.OverloadExamples(`true >= false // true`)),
decls.Overload(overloads.GreaterEqualsInt64,
- argTypes(types.IntType, types.IntType), types.BoolType),
+ argTypes(types.IntType, types.IntType), types.BoolType,
+ decls.OverloadExamples(`3 >= -2 // true`)),
decls.Overload(overloads.GreaterEqualsInt64Double,
- argTypes(types.IntType, types.DoubleType), types.BoolType),
+ argTypes(types.IntType, types.DoubleType), types.BoolType,
+ decls.OverloadExamples(`2 >= 1.1 // true`, `1 >= 1.0 // true`)),
decls.Overload(overloads.GreaterEqualsInt64Uint64,
- argTypes(types.IntType, types.UintType), types.BoolType),
+ argTypes(types.IntType, types.UintType), types.BoolType,
+ decls.OverloadExamples(`3 >= 2u // true`)),
decls.Overload(overloads.GreaterEqualsUint64,
- argTypes(types.UintType, types.UintType), types.BoolType),
+ argTypes(types.UintType, types.UintType), types.BoolType,
+ decls.OverloadExamples(`2u >= 1u // true`)),
decls.Overload(overloads.GreaterEqualsUint64Double,
- argTypes(types.UintType, types.DoubleType), types.BoolType),
+ argTypes(types.UintType, types.DoubleType), types.BoolType,
+ decls.OverloadExamples(`2u >= 1.9 // true`)),
decls.Overload(overloads.GreaterEqualsUint64Int64,
- argTypes(types.UintType, types.IntType), types.BoolType),
+ argTypes(types.UintType, types.IntType), types.BoolType,
+ decls.OverloadExamples(`23u >= 1 // true`, `1u >= 1 // true`)),
decls.Overload(overloads.GreaterEqualsDouble,
- argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType,
+ decls.OverloadExamples(`2.4 >= 2.0 // true`)),
decls.Overload(overloads.GreaterEqualsDoubleInt64,
- argTypes(types.DoubleType, types.IntType), types.BoolType),
+ argTypes(types.DoubleType, types.IntType), types.BoolType,
+ decls.OverloadExamples(`3.1 >= 3 // true`)),
decls.Overload(overloads.GreaterEqualsDoubleUint64,
- argTypes(types.DoubleType, types.UintType), types.BoolType),
+ argTypes(types.DoubleType, types.UintType), types.BoolType,
+ decls.OverloadExamples(`2.3 >= 2u // true`)),
decls.Overload(overloads.GreaterEqualsString,
- argTypes(types.StringType, types.StringType), types.BoolType),
+ argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.OverloadExamples(`'b' >= 'a' // true`)),
decls.Overload(overloads.GreaterEqualsBytes,
- argTypes(types.BytesType, types.BytesType), types.BoolType),
+ argTypes(types.BytesType, types.BytesType), types.BoolType,
+ decls.OverloadExamples(`b'world' >= b'hello' // true`)),
decls.Overload(overloads.GreaterEqualsTimestamp,
- argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType,
+ decls.OverloadExamples(`timestamp('2001-01-01T02:03:04Z') >= timestamp('2001-01-01T02:03:04Z') // true`)),
decls.Overload(overloads.GreaterEqualsDuration,
- argTypes(types.DurationType, types.DurationType), types.BoolType),
+ argTypes(types.DurationType, types.DurationType), types.BoolType,
+ decls.OverloadExamples(`duration('60s') >= duration('1m') // true`)),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
cmp := lhs.(traits.Comparer).Compare(rhs)
if cmp == types.IntOne || cmp == types.IntZero {
@@ -350,16 +493,28 @@ func init() {
// Indexing
function(operators.Index,
- decls.Overload(overloads.IndexList, argTypes(listOfA, types.IntType), paramA),
- decls.Overload(overloads.IndexMap, argTypes(mapOfAB, paramA), paramB),
+ decls.FunctionDocs(`select a value from a list by index, or value from a map by key`),
+ decls.Overload(overloads.IndexList, argTypes(listOfA, types.IntType), paramA,
+ decls.OverloadExamples(`[1, 2, 3][1] // 2`)),
+ decls.Overload(overloads.IndexMap, argTypes(mapOfAB, paramA), paramB,
+ decls.OverloadExamples(
+ `{'key': 'value'}['key'] // 'value'`,
+ `{'key': 'value'}['missing'] // error`)),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
return lhs.(traits.Indexer).Get(rhs)
}, traits.IndexerType)),
// Collections operators
function(operators.In,
- decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType),
- decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType),
+ decls.FunctionDocs(`test whether a value exists in a list, or a key exists in a map`),
+ decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType,
+ decls.OverloadExamples(
+ `2 in [1, 2, 3] // true`,
+ `"a" in ["b", "c"] // false`)),
+ decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType,
+ decls.OverloadExamples(
+ `'key1' in {'key1': 'value1', 'key2': 'value2'} // true`,
+ `3 in {1: "one", 2: "two"} // false`)),
decls.SingletonBinaryBinding(inAggregate)),
function(operators.OldIn,
decls.DisableDeclaration(true), // safe deprecation
@@ -372,221 +527,364 @@ func init() {
decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType),
decls.SingletonBinaryBinding(inAggregate)),
function(overloads.Size,
- decls.Overload(overloads.SizeBytes, argTypes(types.BytesType), types.IntType),
- decls.MemberOverload(overloads.SizeBytesInst, argTypes(types.BytesType), types.IntType),
- decls.Overload(overloads.SizeList, argTypes(listOfA), types.IntType),
- decls.MemberOverload(overloads.SizeListInst, argTypes(listOfA), types.IntType),
- decls.Overload(overloads.SizeMap, argTypes(mapOfAB), types.IntType),
- decls.MemberOverload(overloads.SizeMapInst, argTypes(mapOfAB), types.IntType),
- decls.Overload(overloads.SizeString, argTypes(types.StringType), types.IntType),
- decls.MemberOverload(overloads.SizeStringInst, argTypes(types.StringType), types.IntType),
+ decls.FunctionDocs(
+ `compute the size of a list or map, the number of characters in a string,`,
+ `or the number of bytes in a sequence`),
+ decls.Overload(overloads.SizeBytes, argTypes(types.BytesType), types.IntType,
+ decls.OverloadExamples(`size(b'123') // 3`)),
+ decls.MemberOverload(overloads.SizeBytesInst, argTypes(types.BytesType), types.IntType,
+ decls.OverloadExamples(`b'123'.size() // 3`)),
+ decls.Overload(overloads.SizeList, argTypes(listOfA), types.IntType,
+ decls.OverloadExamples(`size([1, 2, 3]) // 3`)),
+ decls.MemberOverload(overloads.SizeListInst, argTypes(listOfA), types.IntType,
+ decls.OverloadExamples(`[1, 2, 3].size() // 3`)),
+ decls.Overload(overloads.SizeMap, argTypes(mapOfAB), types.IntType,
+ decls.OverloadExamples(`size({'a': 1, 'b': 2}) // 2`)),
+ decls.MemberOverload(overloads.SizeMapInst, argTypes(mapOfAB), types.IntType,
+ decls.OverloadExamples(`{'a': 1, 'b': 2}.size() // 2`)),
+ decls.Overload(overloads.SizeString, argTypes(types.StringType), types.IntType,
+ decls.OverloadExamples(`size('hello') // 5`)),
+ decls.MemberOverload(overloads.SizeStringInst, argTypes(types.StringType), types.IntType,
+ decls.OverloadExamples(`'hello'.size() // 5`)),
decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
return val.(traits.Sizer).Size()
}, traits.SizerType)),
// Type conversions
function(overloads.TypeConvertType,
- decls.Overload(overloads.TypeConvertType, argTypes(paramA), types.NewTypeTypeWithParam(paramA)),
+ decls.FunctionDocs(`convert a value to its type identifier`),
+ decls.Overload(overloads.TypeConvertType, argTypes(paramA), types.NewTypeTypeWithParam(paramA),
+ decls.OverloadExamples(
+ `type(1) // int`,
+ `type('hello') // string`,
+ `type(int) // type`,
+ `type(type) // type`)),
decls.SingletonUnaryBinding(convertToType(types.TypeType))),
// Bool conversions
function(overloads.TypeConvertBool,
+ decls.FunctionDocs(`convert a value to a boolean`),
decls.Overload(overloads.BoolToBool, argTypes(types.BoolType), types.BoolType,
+
+ decls.OverloadExamples(`bool(true) // true`),
decls.UnaryBinding(identity)),
decls.Overload(overloads.StringToBool, argTypes(types.StringType), types.BoolType,
+
+ decls.OverloadExamples(`bool('true') // true`, `bool('false') // false`),
decls.UnaryBinding(convertToType(types.BoolType)))),
// Bytes conversions
function(overloads.TypeConvertBytes,
+ decls.FunctionDocs(`convert a value to bytes`),
decls.Overload(overloads.BytesToBytes, argTypes(types.BytesType), types.BytesType,
+ decls.OverloadExamples(`bytes(b'abc') // b'abc'`),
decls.UnaryBinding(identity)),
decls.Overload(overloads.StringToBytes, argTypes(types.StringType), types.BytesType,
+ decls.OverloadExamples(`bytes('hello') // b'hello'`),
decls.UnaryBinding(convertToType(types.BytesType)))),
// Double conversions
function(overloads.TypeConvertDouble,
+ decls.FunctionDocs(`convert a value to a double`),
decls.Overload(overloads.DoubleToDouble, argTypes(types.DoubleType), types.DoubleType,
+ decls.OverloadExamples(`double(1.23) // 1.23`),
decls.UnaryBinding(identity)),
decls.Overload(overloads.IntToDouble, argTypes(types.IntType), types.DoubleType,
+ decls.OverloadExamples(`double(123) // 123.0`),
decls.UnaryBinding(convertToType(types.DoubleType))),
decls.Overload(overloads.StringToDouble, argTypes(types.StringType), types.DoubleType,
+ decls.OverloadExamples(`double('1.23') // 1.23`),
decls.UnaryBinding(convertToType(types.DoubleType))),
decls.Overload(overloads.UintToDouble, argTypes(types.UintType), types.DoubleType,
+ decls.OverloadExamples(`double(123u) // 123.0`),
decls.UnaryBinding(convertToType(types.DoubleType)))),
// Duration conversions
function(overloads.TypeConvertDuration,
+ decls.FunctionDocs(`convert a value to a google.protobuf.Duration`),
decls.Overload(overloads.DurationToDuration, argTypes(types.DurationType), types.DurationType,
+ decls.OverloadExamples(`duration(duration('1s')) // duration('1s')`),
decls.UnaryBinding(identity)),
decls.Overload(overloads.IntToDuration, argTypes(types.IntType), types.DurationType,
decls.UnaryBinding(convertToType(types.DurationType))),
decls.Overload(overloads.StringToDuration, argTypes(types.StringType), types.DurationType,
+ decls.OverloadExamples(`duration('1h2m3s') // duration('3723s')`),
decls.UnaryBinding(convertToType(types.DurationType)))),
// Dyn conversions
function(overloads.TypeConvertDyn,
- decls.Overload(overloads.ToDyn, argTypes(paramA), types.DynType),
+ decls.FunctionDocs(`indicate that the type is dynamic for type-checking purposes`),
+ decls.Overload(overloads.ToDyn, argTypes(paramA), types.DynType,
+ decls.OverloadExamples(`dyn(1) // 1`)),
decls.SingletonUnaryBinding(identity)),
// Int conversions
function(overloads.TypeConvertInt,
+ decls.FunctionDocs(`convert a value to an int`),
decls.Overload(overloads.IntToInt, argTypes(types.IntType), types.IntType,
+ decls.OverloadExamples(`int(123) // 123`),
decls.UnaryBinding(identity)),
decls.Overload(overloads.DoubleToInt, argTypes(types.DoubleType), types.IntType,
+ decls.OverloadExamples(`int(123.45) // 123`),
decls.UnaryBinding(convertToType(types.IntType))),
decls.Overload(overloads.DurationToInt, argTypes(types.DurationType), types.IntType,
- decls.UnaryBinding(convertToType(types.IntType))),
+ decls.OverloadExamples(`int(duration('1s')) // 1000000000`),
+ decls.UnaryBinding(convertToType(types.IntType))), // Duration to nanoseconds
decls.Overload(overloads.StringToInt, argTypes(types.StringType), types.IntType,
+ decls.OverloadExamples(`int('123') // 123`, `int('-456') // -456`),
decls.UnaryBinding(convertToType(types.IntType))),
decls.Overload(overloads.TimestampToInt, argTypes(types.TimestampType), types.IntType,
- decls.UnaryBinding(convertToType(types.IntType))),
+ decls.OverloadExamples(`int(timestamp('1970-01-01T00:00:01Z')) // 1`),
+ decls.UnaryBinding(convertToType(types.IntType))), // Timestamp to epoch seconds
decls.Overload(overloads.UintToInt, argTypes(types.UintType), types.IntType,
- decls.UnaryBinding(convertToType(types.IntType))),
- ),
+ decls.OverloadExamples(`int(123u) // 123`),
+ decls.UnaryBinding(convertToType(types.IntType)))),
// String conversions
function(overloads.TypeConvertString,
+ decls.FunctionDocs(`convert a value to a string`),
decls.Overload(overloads.StringToString, argTypes(types.StringType), types.StringType,
+ decls.OverloadExamples(`string('hello') // 'hello'`),
decls.UnaryBinding(identity)),
decls.Overload(overloads.BoolToString, argTypes(types.BoolType), types.StringType,
+ decls.OverloadExamples(`string(true) // 'true'`),
decls.UnaryBinding(convertToType(types.StringType))),
decls.Overload(overloads.BytesToString, argTypes(types.BytesType), types.StringType,
+ decls.OverloadExamples(`string(b'hello') // 'hello'`),
decls.UnaryBinding(convertToType(types.StringType))),
decls.Overload(overloads.DoubleToString, argTypes(types.DoubleType), types.StringType,
- decls.UnaryBinding(convertToType(types.StringType))),
+ decls.UnaryBinding(convertToType(types.StringType)),
+ decls.OverloadExamples(`string(-1.23e4) // '-12300'`)),
decls.Overload(overloads.DurationToString, argTypes(types.DurationType), types.StringType,
+ decls.OverloadExamples(`string(duration('1h30m')) // '5400s'`),
decls.UnaryBinding(convertToType(types.StringType))),
decls.Overload(overloads.IntToString, argTypes(types.IntType), types.StringType,
+ decls.OverloadExamples(`string(-123) // '-123'`),
decls.UnaryBinding(convertToType(types.StringType))),
decls.Overload(overloads.TimestampToString, argTypes(types.TimestampType), types.StringType,
+ decls.OverloadExamples(`string(timestamp('1970-01-01T00:00:00Z')) // '1970-01-01T00:00:00Z'`),
decls.UnaryBinding(convertToType(types.StringType))),
decls.Overload(overloads.UintToString, argTypes(types.UintType), types.StringType,
+ decls.OverloadExamples(`string(123u) // '123'`),
decls.UnaryBinding(convertToType(types.StringType)))),
// Timestamp conversions
function(overloads.TypeConvertTimestamp,
+ decls.FunctionDocs(`convert a value to a google.protobuf.Timestamp`),
decls.Overload(overloads.TimestampToTimestamp, argTypes(types.TimestampType), types.TimestampType,
+ decls.OverloadExamples(`timestamp(timestamp('2023-01-01T00:00:00Z')) // timestamp('2023-01-01T00:00:00Z')`),
decls.UnaryBinding(identity)),
decls.Overload(overloads.IntToTimestamp, argTypes(types.IntType), types.TimestampType,
+ decls.OverloadExamples(`timestamp(1) // timestamp('1970-01-01T00:00:01Z')`), // Epoch seconds to Timestamp
decls.UnaryBinding(convertToType(types.TimestampType))),
decls.Overload(overloads.StringToTimestamp, argTypes(types.StringType), types.TimestampType,
+ decls.OverloadExamples(`timestamp('2025-01-01T12:34:56Z') // timestamp('2025-01-01T12:34:56Z')`),
decls.UnaryBinding(convertToType(types.TimestampType)))),
// Uint conversions
function(overloads.TypeConvertUint,
+ decls.FunctionDocs(`convert a value to a uint`),
decls.Overload(overloads.UintToUint, argTypes(types.UintType), types.UintType,
+ decls.OverloadExamples(`uint(123u) // 123u`),
decls.UnaryBinding(identity)),
decls.Overload(overloads.DoubleToUint, argTypes(types.DoubleType), types.UintType,
+ decls.OverloadExamples(`uint(123.45) // 123u`),
decls.UnaryBinding(convertToType(types.UintType))),
decls.Overload(overloads.IntToUint, argTypes(types.IntType), types.UintType,
+ decls.OverloadExamples(`uint(123) // 123u`),
decls.UnaryBinding(convertToType(types.UintType))),
decls.Overload(overloads.StringToUint, argTypes(types.StringType), types.UintType,
+ decls.OverloadExamples(`uint('123') // 123u`),
decls.UnaryBinding(convertToType(types.UintType)))),
// String functions
function(overloads.Contains,
+ decls.FunctionDocs(`test whether a string contains a substring`),
decls.MemberOverload(overloads.ContainsString,
argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.OverloadExamples(
+ `'hello world'.contains('o w') // true`,
+ `'hello world'.contains('goodbye') // false`),
decls.BinaryBinding(types.StringContains)),
decls.DisableTypeGuards(true)),
function(overloads.EndsWith,
+ decls.FunctionDocs(`test whether a string ends with a substring suffix`),
decls.MemberOverload(overloads.EndsWithString,
argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.OverloadExamples(
+ `'hello world'.endsWith('world') // true`,
+ `'hello world'.endsWith('hello') // false`),
decls.BinaryBinding(types.StringEndsWith)),
decls.DisableTypeGuards(true)),
function(overloads.StartsWith,
+ decls.FunctionDocs(`test whether a string starts with a substring prefix`),
decls.MemberOverload(overloads.StartsWithString,
argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.OverloadExamples(
+ `'hello world'.startsWith('hello') // true`,
+ `'hello world'.startsWith('world') // false`),
decls.BinaryBinding(types.StringStartsWith)),
decls.DisableTypeGuards(true)),
function(overloads.Matches,
- decls.Overload(overloads.Matches, argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.FunctionDocs(`test whether a string matches an RE2 regular expression`),
+ decls.Overload(overloads.Matches, argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.OverloadExamples(
+ `matches('123-456', '^[0-9]+(-[0-9]+)?$') // true`,
+ `matches('hello', '^h.*o$') // true`)),
decls.MemberOverload(overloads.MatchesString,
- argTypes(types.StringType, types.StringType), types.BoolType),
+ argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.OverloadExamples(
+ `'123-456'.matches('^[0-9]+(-[0-9]+)?$') // true`,
+ `'hello'.matches('^h.*o$') // true`)),
decls.SingletonBinaryBinding(func(str, pat ref.Val) ref.Val {
return str.(traits.Matcher).Match(pat)
}, traits.MatcherType)),
// Timestamp / duration functions
function(overloads.TimeGetFullYear,
+ decls.FunctionDocs(`get the 0-based full year from a timestamp, UTC unless an IANA timezone is specified.`),
decls.MemberOverload(overloads.TimestampToYear,
- argTypes(types.TimestampType), types.IntType),
+ argTypes(types.TimestampType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-14T10:30:45.123Z').getFullYear() // 2023`),
+ decls.UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetFullYear(ts, utcTZ)
+ })),
decls.MemberOverload(overloads.TimestampToYearWithTz,
- argTypes(types.TimestampType, types.StringType), types.IntType)),
+ argTypes(types.TimestampType, types.StringType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-01-01T05:30:00Z').getFullYear('-08:00') // 2022`),
+ decls.BinaryBinding(timestampGetFullYear))),
function(overloads.TimeGetMonth,
+ decls.FunctionDocs(`get the 0-based month from a timestamp, UTC unless an IANA timezone is specified.`),
decls.MemberOverload(overloads.TimestampToMonth,
- argTypes(types.TimestampType), types.IntType),
+ argTypes(types.TimestampType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-14T10:30:45.123Z').getMonth() // 6`), // July is month 6
+ decls.UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetMonth(ts, utcTZ)
+ })),
decls.MemberOverload(overloads.TimestampToMonthWithTz,
- argTypes(types.TimestampType, types.StringType), types.IntType)),
+ argTypes(types.TimestampType, types.StringType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-01-01T05:30:00Z').getMonth('America/Los_Angeles') // 11`), // December is month 11
+ decls.BinaryBinding(timestampGetMonth))),
function(overloads.TimeGetDayOfYear,
+ decls.FunctionDocs(`get the 0-based day of the year from a timestamp, UTC unless an IANA timezone is specified.`),
decls.MemberOverload(overloads.TimestampToDayOfYear,
- argTypes(types.TimestampType), types.IntType),
+ argTypes(types.TimestampType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-01-02T00:00:00Z').getDayOfYear() // 1`),
+ decls.UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfYear(ts, utcTZ)
+ })),
decls.MemberOverload(overloads.TimestampToDayOfYearWithTz,
- argTypes(types.TimestampType, types.StringType), types.IntType)),
+ argTypes(types.TimestampType, types.StringType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-01-01T05:00:00Z').getDayOfYear('America/Los_Angeles') // 364`),
+ decls.BinaryBinding(timestampGetDayOfYear))),
function(overloads.TimeGetDayOfMonth,
+ decls.FunctionDocs(`get the 0-based day of the month from a timestamp, UTC unless an IANA timezone is specified.`),
decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBased,
- argTypes(types.TimestampType), types.IntType),
+ argTypes(types.TimestampType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-14T10:30:45.123Z').getDayOfMonth() // 13`),
+ decls.UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfMonthZeroBased(ts, utcTZ)
+ })),
decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz,
- argTypes(types.TimestampType, types.StringType), types.IntType)),
+ argTypes(types.TimestampType, types.StringType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-01T05:00:00Z').getDayOfMonth('America/Los_Angeles') // 29`),
+ decls.BinaryBinding(timestampGetDayOfMonthZeroBased))),
function(overloads.TimeGetDate,
+ decls.FunctionDocs(`get the 1-based day of the month from a timestamp, UTC unless an IANA timezone is specified.`),
decls.MemberOverload(overloads.TimestampToDayOfMonthOneBased,
- argTypes(types.TimestampType), types.IntType),
+ argTypes(types.TimestampType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-14T10:30:45.123Z').getDate() // 14`),
+ decls.UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfMonthOneBased(ts, utcTZ)
+ })),
decls.MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz,
- argTypes(types.TimestampType, types.StringType), types.IntType)),
+ argTypes(types.TimestampType, types.StringType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-01T05:00:00Z').getDate('America/Los_Angeles') // 30`),
+ decls.BinaryBinding(timestampGetDayOfMonthOneBased))),
function(overloads.TimeGetDayOfWeek,
+ decls.FunctionDocs(`get the 0-based day of the week from a timestamp, UTC unless an IANA timezone is specified.`),
decls.MemberOverload(overloads.TimestampToDayOfWeek,
- argTypes(types.TimestampType), types.IntType),
+ argTypes(types.TimestampType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-14T10:30:45.123Z').getDayOfWeek() // 5`), // Friday is day 5
+ decls.UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfWeek(ts, utcTZ)
+ })),
decls.MemberOverload(overloads.TimestampToDayOfWeekWithTz,
- argTypes(types.TimestampType, types.StringType), types.IntType)),
+ argTypes(types.TimestampType, types.StringType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-16T05:00:00Z').getDayOfWeek('America/Los_Angeles') // 6`), // Saturday is day 6
+ decls.BinaryBinding(timestampGetDayOfWeek))),
function(overloads.TimeGetHours,
+ decls.FunctionDocs(`get the hours portion from a timestamp, or convert a duration to hours`),
decls.MemberOverload(overloads.TimestampToHours,
- argTypes(types.TimestampType), types.IntType),
+ argTypes(types.TimestampType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-14T10:30:45.123Z').getHours() // 10`),
+ decls.UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetHours(ts, utcTZ)
+ })),
decls.MemberOverload(overloads.TimestampToHoursWithTz,
- argTypes(types.TimestampType, types.StringType), types.IntType),
+ argTypes(types.TimestampType, types.StringType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-14T10:30:45.123Z').getHours('America/Los_Angeles') // 2`),
+ decls.BinaryBinding(timestampGetHours)),
decls.MemberOverload(overloads.DurationToHours,
- argTypes(types.DurationType), types.IntType)),
+ argTypes(types.DurationType), types.IntType,
+ decls.OverloadExamples(`duration('3723s').getHours() // 1`),
+ decls.UnaryBinding(types.DurationGetHours))),
function(overloads.TimeGetMinutes,
+ decls.FunctionDocs(`get the minutes portion from a timestamp, or convert a duration to minutes`),
decls.MemberOverload(overloads.TimestampToMinutes,
- argTypes(types.TimestampType), types.IntType),
+ argTypes(types.TimestampType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-14T10:30:45.123Z').getMinutes() // 30`),
+ decls.UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetMinutes(ts, utcTZ)
+ })),
decls.MemberOverload(overloads.TimestampToMinutesWithTz,
- argTypes(types.TimestampType, types.StringType), types.IntType),
+ argTypes(types.TimestampType, types.StringType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-14T10:30:45.123Z').getMinutes('America/Los_Angeles') // 30`),
+ decls.BinaryBinding(timestampGetMinutes)),
decls.MemberOverload(overloads.DurationToMinutes,
- argTypes(types.DurationType), types.IntType)),
+ argTypes(types.DurationType), types.IntType,
+ decls.OverloadExamples(`duration('3723s').getMinutes() // 62`),
+ decls.UnaryBinding(types.DurationGetMinutes))),
function(overloads.TimeGetSeconds,
+ decls.FunctionDocs(`get the seconds portion from a timestamp, or convert a duration to seconds`),
decls.MemberOverload(overloads.TimestampToSeconds,
- argTypes(types.TimestampType), types.IntType),
+ argTypes(types.TimestampType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-14T10:30:45.123Z').getSeconds() // 45`),
+ decls.UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetSeconds(ts, utcTZ)
+ })),
decls.MemberOverload(overloads.TimestampToSecondsWithTz,
- argTypes(types.TimestampType, types.StringType), types.IntType),
+ argTypes(types.TimestampType, types.StringType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-14T10:30:45.123Z').getSeconds('America/Los_Angeles') // 45`),
+ decls.BinaryBinding(timestampGetSeconds)),
decls.MemberOverload(overloads.DurationToSeconds,
- argTypes(types.DurationType), types.IntType)),
+ argTypes(types.DurationType), types.IntType,
+ decls.OverloadExamples(`duration('3723.456s').getSeconds() // 3723`),
+ decls.UnaryBinding(types.DurationGetSeconds))),
function(overloads.TimeGetMilliseconds,
+ decls.FunctionDocs(`get the milliseconds portion from a timestamp`),
decls.MemberOverload(overloads.TimestampToMilliseconds,
- argTypes(types.TimestampType), types.IntType),
+ argTypes(types.TimestampType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-14T10:30:45.123Z').getMilliseconds() // 123`),
+ decls.UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetMilliseconds(ts, utcTZ)
+ })),
decls.MemberOverload(overloads.TimestampToMillisecondsWithTz,
- argTypes(types.TimestampType, types.StringType), types.IntType),
+ argTypes(types.TimestampType, types.StringType), types.IntType,
+ decls.OverloadExamples(`timestamp('2023-07-14T10:30:45.123Z').getMilliseconds('America/Los_Angeles') // 123`),
+ decls.BinaryBinding(timestampGetMilliseconds)),
decls.MemberOverload(overloads.DurationToMilliseconds,
- argTypes(types.DurationType), types.IntType)),
- }
-
- stdFnDecls = make([]*exprpb.Decl, 0, len(stdFunctions))
- for _, fn := range stdFunctions {
- if fn.IsDeclarationDisabled() {
- continue
- }
- ed, err := decls.FunctionDeclToExprDecl(fn)
- if err != nil {
- panic(err)
- }
- stdFnDecls = append(stdFnDecls, ed)
+ argTypes(types.DurationType), types.IntType,
+ decls.UnaryBinding(types.DurationGetMilliseconds))),
}
}
@@ -595,27 +893,11 @@ func Functions() []*decls.FunctionDecl {
return stdFunctions
}
-// FunctionExprDecls returns the legacy style protobuf-typed declarations for all functions and overloads
-// in the CEL standard environment.
-//
-// Deprecated: use Functions
-func FunctionExprDecls() []*exprpb.Decl {
- return stdFnDecls
-}
-
// Types returns the set of standard library types for CEL.
func Types() []*decls.VariableDecl {
return stdTypes
}
-// TypeExprDecls returns the legacy style protobuf-typed declarations for all types in the CEL
-// standard environment.
-//
-// Deprecated: use Types
-func TypeExprDecls() []*exprpb.Decl {
- return stdTypeDecls
-}
-
func notStrictlyFalse(value ref.Val) ref.Val {
if types.IsBool(value) {
return value
@@ -659,3 +941,118 @@ func convertToType(t ref.Type) functions.UnaryOp {
return val.ConvertToType(t)
}
}
+
+func timestampGetFullYear(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErrFromString(err.Error())
+ }
+ return types.Int(t.Year())
+}
+
+func timestampGetMonth(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErrFromString(err.Error())
+ }
+ // CEL spec indicates that the month should be 0-based, but the Time value
+ // for Month() is 1-based.
+ return types.Int(t.Month() - 1)
+}
+
+func timestampGetDayOfYear(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErrFromString(err.Error())
+ }
+ return types.Int(t.YearDay() - 1)
+}
+
+func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErrFromString(err.Error())
+ }
+ return types.Int(t.Day() - 1)
+}
+
+func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErrFromString(err.Error())
+ }
+ return types.Int(t.Day())
+}
+
+func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErrFromString(err.Error())
+ }
+ return types.Int(t.Weekday())
+}
+
+func timestampGetHours(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErrFromString(err.Error())
+ }
+ return types.Int(t.Hour())
+}
+
+func timestampGetMinutes(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErrFromString(err.Error())
+ }
+ return types.Int(t.Minute())
+}
+
+func timestampGetSeconds(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErrFromString(err.Error())
+ }
+ return types.Int(t.Second())
+}
+
+func timestampGetMilliseconds(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErrFromString(err.Error())
+ }
+ return types.Int(t.Nanosecond() / 1000000)
+}
+
+func inTimeZone(ts, tz ref.Val) (time.Time, error) {
+ t := ts.(types.Timestamp)
+ val := string(tz.(types.String))
+ ind := strings.Index(val, ":")
+ if ind == -1 {
+ loc, err := time.LoadLocation(val)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return t.In(loc), nil
+ }
+
+ // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC
+ // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes.
+ hr, err := strconv.Atoi(string(val[0:ind]))
+ if err != nil {
+ return time.Time{}, err
+ }
+ min, err := strconv.Atoi(string(val[ind+1:]))
+ if err != nil {
+ return time.Time{}, err
+ }
+ var offset int
+ if string(val[0]) == "-" {
+ offset = hr*60 - min
+ } else {
+ offset = hr*60 + min
+ }
+ secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds())
+ timezone := time.FixedZone("", secondsEastOfUTC)
+ return t.In(timezone), nil
+}
diff --git a/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/BUILD.bazel
index b5e44ffbf..7082bc755 100644
--- a/vendor/github.com/google/cel-go/common/types/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/types/BUILD.bazel
@@ -18,6 +18,7 @@ go_library(
"int.go",
"iterator.go",
"json_value.go",
+ "format.go",
"list.go",
"map.go",
"null.go",
@@ -40,10 +41,12 @@ go_library(
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
"@com_github_stoewer_go_strcase//:go_default_library",
+ "@dev_cel_expr//:expr",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//encoding/protojson:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//types/dynamicpb:go_default_library",
"@org_golang_google_protobuf//types/known/anypb:go_default_library",
"@org_golang_google_protobuf//types/known/durationpb:go_default_library",
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
diff --git a/vendor/github.com/google/cel-go/common/types/bool.go b/vendor/github.com/google/cel-go/common/types/bool.go
index 565734f3f..1f9e10739 100644
--- a/vendor/github.com/google/cel-go/common/types/bool.go
+++ b/vendor/github.com/google/cel-go/common/types/bool.go
@@ -18,6 +18,7 @@ import (
"fmt"
"reflect"
"strconv"
+ "strings"
"github.com/google/cel-go/common/types/ref"
@@ -128,6 +129,14 @@ func (b Bool) Value() any {
return bool(b)
}
+func (b Bool) format(sb *strings.Builder) {
+ if b {
+ sb.WriteString("true")
+ } else {
+ sb.WriteString("false")
+ }
+}
+
// IsBool returns whether the input ref.Val or ref.Type is equal to BoolType.
func IsBool(elem ref.Val) bool {
switch v := elem.(type) {
diff --git a/vendor/github.com/google/cel-go/common/types/bytes.go b/vendor/github.com/google/cel-go/common/types/bytes.go
index 5838755f8..b59e1fc20 100644
--- a/vendor/github.com/google/cel-go/common/types/bytes.go
+++ b/vendor/github.com/google/cel-go/common/types/bytes.go
@@ -19,6 +19,7 @@ import (
"encoding/base64"
"fmt"
"reflect"
+ "strings"
"unicode/utf8"
"github.com/google/cel-go/common/types/ref"
@@ -58,7 +59,17 @@ func (b Bytes) Compare(other ref.Val) ref.Val {
// ConvertToNative implements the ref.Val interface method.
func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
- case reflect.Array, reflect.Slice:
+ case reflect.Array:
+ if len(b) != typeDesc.Len() {
+ return nil, fmt.Errorf("[%d]byte not assignable to [%d]byte array", len(b), typeDesc.Len())
+ }
+ refArrPtr := reflect.New(reflect.ArrayOf(len(b), typeDesc.Elem()))
+ refArr := refArrPtr.Elem()
+ for i, byt := range b {
+ refArr.Index(i).Set(reflect.ValueOf(byt).Convert(typeDesc.Elem()))
+ }
+ return refArr.Interface(), nil
+ case reflect.Slice:
return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
case reflect.Ptr:
switch typeDesc {
@@ -128,3 +139,17 @@ func (b Bytes) Type() ref.Type {
func (b Bytes) Value() any {
return []byte(b)
}
+
+func (b Bytes) format(sb *strings.Builder) {
+ fmt.Fprintf(sb, "b\"%s\"", bytesToOctets([]byte(b)))
+}
+
+// bytesToOctets converts byte sequences to a string using a three digit octal encoded value
+// per byte.
+func bytesToOctets(byteVal []byte) string {
+ var b strings.Builder
+ for _, c := range byteVal {
+ fmt.Fprintf(&b, "\\%03o", c)
+ }
+ return b.String()
+}
diff --git a/vendor/github.com/google/cel-go/common/types/double.go b/vendor/github.com/google/cel-go/common/types/double.go
index 027e78978..1e7de9d6e 100644
--- a/vendor/github.com/google/cel-go/common/types/double.go
+++ b/vendor/github.com/google/cel-go/common/types/double.go
@@ -18,6 +18,8 @@ import (
"fmt"
"math"
"reflect"
+ "strconv"
+ "strings"
"github.com/google/cel-go/common/types/ref"
@@ -209,3 +211,23 @@ func (d Double) Type() ref.Type {
func (d Double) Value() any {
return float64(d)
}
+
+func (d Double) format(sb *strings.Builder) {
+ if math.IsNaN(float64(d)) {
+ sb.WriteString(`double("NaN")`)
+ return
+ }
+ if math.IsInf(float64(d), -1) {
+ sb.WriteString(`double("-Infinity")`)
+ return
+ }
+ if math.IsInf(float64(d), 1) {
+ sb.WriteString(`double("Infinity")`)
+ return
+ }
+ s := strconv.FormatFloat(float64(d), 'f', -1, 64)
+ sb.WriteString(s)
+ if !strings.ContainsRune(s, '.') {
+ sb.WriteString(".0")
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/duration.go b/vendor/github.com/google/cel-go/common/types/duration.go
index 596e56d6b..be58d567e 100644
--- a/vendor/github.com/google/cel-go/common/types/duration.go
+++ b/vendor/github.com/google/cel-go/common/types/duration.go
@@ -18,6 +18,7 @@ import (
"fmt"
"reflect"
"strconv"
+ "strings"
"time"
"github.com/google/cel-go/common/overloads"
@@ -185,6 +186,10 @@ func (d Duration) Value() any {
return d.Duration
}
+func (d Duration) format(sb *strings.Builder) {
+ fmt.Fprintf(sb, `duration("%ss")`, strconv.FormatFloat(d.Seconds(), 'f', -1, 64))
+}
+
// DurationGetHours returns the duration in hours.
func DurationGetHours(val ref.Val) ref.Val {
dur, ok := val.(Duration)
diff --git a/vendor/github.com/google/cel-go/common/types/err.go b/vendor/github.com/google/cel-go/common/types/err.go
index aa8f94b4f..17ab1a95e 100644
--- a/vendor/github.com/google/cel-go/common/types/err.go
+++ b/vendor/github.com/google/cel-go/common/types/err.go
@@ -31,6 +31,7 @@ type Error interface {
// Err type which extends the built-in go error and implements ref.Val.
type Err struct {
error
+ id int64
}
var (
@@ -58,7 +59,30 @@ var (
// NewErr creates a new Err described by the format string and args.
// TODO: Audit the use of this function and standardize the error messages and codes.
func NewErr(format string, args ...any) ref.Val {
- return &Err{fmt.Errorf(format, args...)}
+ return &Err{error: fmt.Errorf(format, args...)}
+}
+
+// NewErrFromString creates a new Err with the provided message.
+// TODO: Audit the use of this function and standardize the error messages and codes.
+func NewErrFromString(message string) ref.Val {
+ return &Err{error: errors.New(message)}
+}
+
+// NewErrWithNodeID creates a new Err described by the format string and args.
+// TODO: Audit the use of this function and standardize the error messages and codes.
+func NewErrWithNodeID(id int64, format string, args ...any) ref.Val {
+ return &Err{error: fmt.Errorf(format, args...), id: id}
+}
+
+// LabelErrNode returns val unaltered it is not an Err or if the error has a non-zero
+// AST node ID already present. Otherwise the id is added to the error for
+// recovery with the Err.NodeID method.
+func LabelErrNode(id int64, val ref.Val) ref.Val {
+ if err, ok := val.(*Err); ok && err.id == 0 {
+ err.id = id
+ return err
+ }
+ return val
}
// NoSuchOverloadErr returns a new types.Err instance with a no such overload message.
@@ -124,6 +148,11 @@ func (e *Err) Value() any {
return e.error
}
+// NodeID returns the AST node ID of the expression that returned the error.
+func (e *Err) NodeID() int64 {
+ return e.id
+}
+
// Is implements errors.Is.
func (e *Err) Is(target error) bool {
return e.error.Error() == target.Error()
diff --git a/vendor/github.com/google/cel-go/common/types/format.go b/vendor/github.com/google/cel-go/common/types/format.go
new file mode 100644
index 000000000..174a2bd04
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/format.go
@@ -0,0 +1,42 @@
+package types
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+type formattable interface {
+ format(*strings.Builder)
+}
+
+// Format formats the value as a string. The result is only intended for human consumption and ignores errors.
+// Do not depend on the output being stable. It may change at any time.
+func Format(val ref.Val) string {
+ var sb strings.Builder
+ formatTo(&sb, val)
+ return sb.String()
+}
+
+func formatTo(sb *strings.Builder, val ref.Val) {
+ if fmtable, ok := val.(formattable); ok {
+ fmtable.format(sb)
+ return
+ }
+ // All of the builtins implement formattable. Try to deal with traits.
+ if l, ok := val.(traits.Lister); ok {
+ formatList(l, sb)
+ return
+ }
+ if m, ok := val.(traits.Mapper); ok {
+ formatMap(m, sb)
+ return
+ }
+ // This could be an error, unknown, opaque or object.
+ // Unfortunately we have no consistent way of inspecting
+ // opaque and object. So we just fallback to fmt.Stringer
+ // and hope it is relavent.
+ fmt.Fprintf(sb, "%s", val)
+}
diff --git a/vendor/github.com/google/cel-go/common/types/int.go b/vendor/github.com/google/cel-go/common/types/int.go
index 940772aed..0ac1997b7 100644
--- a/vendor/github.com/google/cel-go/common/types/int.go
+++ b/vendor/github.com/google/cel-go/common/types/int.go
@@ -19,6 +19,7 @@ import (
"math"
"reflect"
"strconv"
+ "strings"
"time"
"github.com/google/cel-go/common/types/ref"
@@ -90,6 +91,18 @@ func (i Int) ConvertToNative(typeDesc reflect.Type) (any, error) {
return nil, err
}
return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Int8:
+ v, err := int64ToInt8Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Int16:
+ v, err := int64ToInt16Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
case reflect.Int64:
return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil
case reflect.Ptr:
@@ -278,6 +291,10 @@ func (i Int) Value() any {
return int64(i)
}
+func (i Int) format(sb *strings.Builder) {
+ sb.WriteString(strconv.FormatInt(int64(i), 10))
+}
+
// isJSONSafe indicates whether the int is safely representable as a floating point value in JSON.
func (i Int) isJSONSafe() bool {
return i >= minIntJSON && i <= maxIntJSON
diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go
index d4932b4a9..8c023f891 100644
--- a/vendor/github.com/google/cel-go/common/types/list.go
+++ b/vendor/github.com/google/cel-go/common/types/list.go
@@ -190,7 +190,13 @@ func (l *baseList) ConvertToNative(typeDesc reflect.Type) (any, error) {
// Allow the element ConvertToNative() function to determine whether conversion is possible.
otherElemType := typeDesc.Elem()
elemCount := l.size
- nativeList := reflect.MakeSlice(typeDesc, elemCount, elemCount)
+ var nativeList reflect.Value
+ if typeDesc.Kind() == reflect.Array {
+ nativeList = reflect.New(reflect.ArrayOf(elemCount, typeDesc)).Elem().Index(0)
+ } else {
+ nativeList = reflect.MakeSlice(typeDesc, elemCount, elemCount)
+
+ }
for i := 0; i < elemCount; i++ {
elem := l.NativeToValue(l.get(i))
nativeElemVal, err := elem.ConvertToNative(otherElemType)
@@ -237,7 +243,7 @@ func (l *baseList) Equal(other ref.Val) ref.Val {
func (l *baseList) Get(index ref.Val) ref.Val {
ind, err := IndexOrError(index)
if err != nil {
- return ValOrErr(index, err.Error())
+ return ValOrErr(index, "%v", err)
}
if ind < 0 || ind >= l.size {
return NewErr("index '%d' out of range in list size '%d'", ind, l.Size())
@@ -250,6 +256,15 @@ func (l *baseList) IsZeroValue() bool {
return l.size == 0
}
+// Fold calls the FoldEntry method for each (index, value) pair in the list.
+func (l *baseList) Fold(f traits.Folder) {
+ for i := 0; i < l.size; i++ {
+ if !f.FoldEntry(i, l.get(i)) {
+ break
+ }
+ }
+}
+
// Iterator implements the traits.Iterable interface method.
func (l *baseList) Iterator() traits.Iterator {
return newListIterator(l)
@@ -284,6 +299,22 @@ func (l *baseList) String() string {
return sb.String()
}
+func formatList(l traits.Lister, sb *strings.Builder) {
+ sb.WriteString("[")
+ n, _ := l.Size().(Int)
+ for i := 0; i < int(n); i++ {
+ formatTo(sb, l.Get(Int(i)))
+ if i != int(n)-1 {
+ sb.WriteString(", ")
+ }
+ }
+ sb.WriteString("]")
+}
+
+func (l *baseList) format(sb *strings.Builder) {
+ formatList(l, sb)
+}
+
// mutableList aggregates values into its internal storage. For use with internal CEL variables only.
type mutableList struct {
*baseList
@@ -412,7 +443,7 @@ func (l *concatList) Equal(other ref.Val) ref.Val {
func (l *concatList) Get(index ref.Val) ref.Val {
ind, err := IndexOrError(index)
if err != nil {
- return ValOrErr(index, err.Error())
+ return ValOrErr(index, "%v", err)
}
i := Int(ind)
if i < l.prevList.Size().(Int) {
@@ -427,6 +458,15 @@ func (l *concatList) IsZeroValue() bool {
return l.Size().(Int) == 0
}
+// Fold calls the FoldEntry method for each (index, value) pair in the list.
+func (l *concatList) Fold(f traits.Folder) {
+ for i := Int(0); i < l.Size().(Int); i++ {
+ if !f.FoldEntry(i, l.Get(i)) {
+ break
+ }
+ }
+}
+
// Iterator implements the traits.Iterable interface method.
func (l *concatList) Iterator() traits.Iterator {
return newListIterator(l)
@@ -521,3 +561,30 @@ func IndexOrError(index ref.Val) (int, error) {
return -1, fmt.Errorf("unsupported index type '%s' in list", index.Type())
}
}
+
+// ToFoldableList will create a Foldable version of a list suitable for key-value pair iteration.
+//
+// For values which are already Foldable, this call is a no-op. For all other values, the fold is
+// driven via the Size() and Get() calls which means that the folding will function, but take a
+// performance hit.
+func ToFoldableList(l traits.Lister) traits.Foldable {
+ if f, ok := l.(traits.Foldable); ok {
+ return f
+ }
+ return interopFoldableList{Lister: l}
+}
+
+type interopFoldableList struct {
+ traits.Lister
+}
+
+// Fold implements the traits.Foldable interface method and performs an iteration over the
+// range of elements of the list.
+func (l interopFoldableList) Fold(f traits.Folder) {
+ sz := l.Size().(Int)
+ for i := Int(0); i < sz; i++ {
+ if !f.FoldEntry(i, l.Get(i)) {
+ break
+ }
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/map.go b/vendor/github.com/google/cel-go/common/types/map.go
index 739b7aab0..b33096197 100644
--- a/vendor/github.com/google/cel-go/common/types/map.go
+++ b/vendor/github.com/google/cel-go/common/types/map.go
@@ -17,6 +17,7 @@ package types
import (
"fmt"
"reflect"
+ "sort"
"strings"
"github.com/stoewer/go-strcase"
@@ -94,6 +95,24 @@ func NewProtoMap(adapter Adapter, value *pb.Map) traits.Mapper {
}
}
+// NewMutableMap constructs a mutable map from an adapter and a set of map values.
+func NewMutableMap(adapter Adapter, mutableValues map[ref.Val]ref.Val) traits.MutableMapper {
+ mutableCopy := make(map[ref.Val]ref.Val, len(mutableValues))
+ for k, v := range mutableValues {
+ mutableCopy[k] = v
+ }
+ m := &mutableMap{
+ baseMap: &baseMap{
+ Adapter: adapter,
+ mapAccessor: newRefValMapAccessor(mutableCopy),
+ value: mutableCopy,
+ size: len(mutableCopy),
+ },
+ mutableValues: mutableCopy,
+ }
+ return m
+}
+
// mapAccessor is a private interface for finding values within a map and iterating over the keys.
// This interface implements portions of the API surface area required by the traits.Mapper
// interface.
@@ -105,6 +124,9 @@ type mapAccessor interface {
// Iterator returns an Iterator over the map key set.
Iterator() traits.Iterator
+
+ // Fold calls the FoldEntry method for each (key, value) pair in the map.
+ Fold(traits.Folder)
}
// baseMap is a reflection based map implementation designed to handle a variety of map-like types.
@@ -297,6 +319,41 @@ func (m *baseMap) String() string {
return sb.String()
}
+type baseMapEntry struct {
+ key string
+ val string
+}
+
+func formatMap(m traits.Mapper, sb *strings.Builder) {
+ it := m.Iterator()
+ var ents []baseMapEntry
+ if s, ok := m.Size().(Int); ok {
+ ents = make([]baseMapEntry, 0, int(s))
+ }
+ for it.HasNext() == True {
+ k := it.Next()
+ v, _ := m.Find(k)
+ ents = append(ents, baseMapEntry{Format(k), Format(v)})
+ }
+ sort.SliceStable(ents, func(i, j int) bool {
+ return ents[i].key < ents[j].key
+ })
+ sb.WriteString("{")
+ for i, ent := range ents {
+ if i > 0 {
+ sb.WriteString(", ")
+ }
+ sb.WriteString(ent.key)
+ sb.WriteString(": ")
+ sb.WriteString(ent.val)
+ }
+ sb.WriteString("}")
+}
+
+func (m *baseMap) format(sb *strings.Builder) {
+ formatMap(m, sb)
+}
+
// Type implements the ref.Val interface method.
func (m *baseMap) Type() ref.Type {
return MapType
@@ -307,6 +364,28 @@ func (m *baseMap) Value() any {
return m.value
}
+// mutableMap holds onto a set of mutable values which are used for intermediate computations.
+type mutableMap struct {
+ *baseMap
+ mutableValues map[ref.Val]ref.Val
+}
+
+// Insert implements the traits.MutableMapper interface method, returning true if the key insertion
+// succeeds.
+func (m *mutableMap) Insert(k, v ref.Val) ref.Val {
+ if _, found := m.Find(k); found {
+ return NewErr("insert failed: key %v already exists", k)
+ }
+ m.mutableValues[k] = v
+ return m
+}
+
+// ToImmutableMap implements the traits.MutableMapper interface method, converting a mutable map
+// an immutable map implementation.
+func (m *mutableMap) ToImmutableMap() traits.Mapper {
+ return NewRefValMap(m.Adapter, m.mutableValues)
+}
+
func newJSONStructAccessor(adapter Adapter, st map[string]*structpb.Value) mapAccessor {
return &jsonStructAccessor{
Adapter: adapter,
@@ -350,6 +429,15 @@ func (a *jsonStructAccessor) Iterator() traits.Iterator {
}
}
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (a *jsonStructAccessor) Fold(f traits.Folder) {
+ for k, v := range a.st {
+ if !f.FoldEntry(k, v) {
+ break
+ }
+ }
+}
+
func newReflectMapAccessor(adapter Adapter, value reflect.Value) mapAccessor {
keyType := value.Type().Key()
return &reflectMapAccessor{
@@ -424,6 +512,16 @@ func (m *reflectMapAccessor) Iterator() traits.Iterator {
}
}
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (m *reflectMapAccessor) Fold(f traits.Folder) {
+ mapRange := m.refValue.MapRange()
+ for mapRange.Next() {
+ if !f.FoldEntry(mapRange.Key().Interface(), mapRange.Value().Interface()) {
+ break
+ }
+ }
+}
+
func newRefValMapAccessor(mapVal map[ref.Val]ref.Val) mapAccessor {
return &refValMapAccessor{mapVal: mapVal}
}
@@ -477,6 +575,15 @@ func (a *refValMapAccessor) Iterator() traits.Iterator {
}
}
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (a *refValMapAccessor) Fold(f traits.Folder) {
+ for k, v := range a.mapVal {
+ if !f.FoldEntry(k, v) {
+ break
+ }
+ }
+}
+
func newStringMapAccessor(strMap map[string]string) mapAccessor {
return &stringMapAccessor{mapVal: strMap}
}
@@ -515,6 +622,15 @@ func (a *stringMapAccessor) Iterator() traits.Iterator {
}
}
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (a *stringMapAccessor) Fold(f traits.Folder) {
+ for k, v := range a.mapVal {
+ if !f.FoldEntry(k, v) {
+ break
+ }
+ }
+}
+
func newStringIfaceMapAccessor(adapter Adapter, mapVal map[string]any) mapAccessor {
return &stringIfaceMapAccessor{
Adapter: adapter,
@@ -557,6 +673,15 @@ func (a *stringIfaceMapAccessor) Iterator() traits.Iterator {
}
}
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (a *stringIfaceMapAccessor) Fold(f traits.Folder) {
+ for k, v := range a.mapVal {
+ if !f.FoldEntry(k, v) {
+ break
+ }
+ }
+}
+
// protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to
// accessing protoreflect.Map values.
type protoMap struct {
@@ -769,6 +894,13 @@ func (m *protoMap) Iterator() traits.Iterator {
}
}
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (m *protoMap) Fold(f traits.Folder) {
+ m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ return f.FoldEntry(k.Interface(), v.Interface())
+ })
+}
+
// Size returns the number of entries in the protoreflect.Map.
func (m *protoMap) Size() ref.Val {
return Int(m.value.Len())
@@ -852,3 +984,55 @@ func (it *stringKeyIterator) Next() ref.Val {
}
return nil
}
+
+// ToFoldableMap will create a Foldable version of a map suitable for key-value pair iteration.
+//
+// For values which are already Foldable, this call is a no-op. For all other values, the fold
+// is driven via the Iterator HasNext() and Next() calls as well as the map's Get() method
+// which means that the folding will function, but take a performance hit.
+func ToFoldableMap(m traits.Mapper) traits.Foldable {
+ if f, ok := m.(traits.Foldable); ok {
+ return f
+ }
+ return interopFoldableMap{Mapper: m}
+}
+
+type interopFoldableMap struct {
+ traits.Mapper
+}
+
+func (m interopFoldableMap) Fold(f traits.Folder) {
+ it := m.Iterator()
+ for it.HasNext() == True {
+ k := it.Next()
+ if !f.FoldEntry(k, m.Get(k)) {
+ break
+ }
+ }
+}
+
+// InsertMapKeyValue inserts a key, value pair into the target map if the target map does not
+// already contain the given key.
+//
+// If the map is mutable, it is modified in-place per the MutableMapper contract.
+// If the map is not mutable, a copy containing the new key, value pair is made.
+func InsertMapKeyValue(m traits.Mapper, k, v ref.Val) ref.Val {
+ if mutable, ok := m.(traits.MutableMapper); ok {
+ return mutable.Insert(k, v)
+ }
+
+ // Otherwise perform the slow version of the insertion which makes a copy of the incoming map.
+ if _, found := m.Find(k); !found {
+ size := m.Size().(Int)
+ copy := make(map[ref.Val]ref.Val, size+1)
+ copy[k] = v
+ it := m.Iterator()
+ for it.HasNext() == True {
+ nextK := it.Next()
+ nextV := m.Get(nextK)
+ copy[nextK] = nextV
+ }
+ return DefaultTypeAdapter.NativeToValue(copy)
+ }
+ return NewErr("insert failed: key %v already exists", k)
+}
diff --git a/vendor/github.com/google/cel-go/common/types/null.go b/vendor/github.com/google/cel-go/common/types/null.go
index 926ca3dc9..2c0297fe6 100644
--- a/vendor/github.com/google/cel-go/common/types/null.go
+++ b/vendor/github.com/google/cel-go/common/types/null.go
@@ -17,6 +17,7 @@ package types
import (
"fmt"
"reflect"
+ "strings"
"google.golang.org/protobuf/proto"
@@ -35,6 +36,8 @@ var (
// golang reflect type for Null values.
nullReflectType = reflect.TypeOf(NullValue)
+
+ protoIfaceType = reflect.TypeOf((*proto.Message)(nil)).Elem()
)
// ConvertToNative implements ref.Val.ConvertToNative.
@@ -61,8 +64,14 @@ func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) {
return structpb.NewNullValue(), nil
case boolWrapperType, byteWrapperType, doubleWrapperType, floatWrapperType,
int32WrapperType, int64WrapperType, stringWrapperType, uint32WrapperType,
- uint64WrapperType:
+ uint64WrapperType, durationValueType, timestampValueType, protoIfaceType:
return nil, nil
+ case jsonListValueType, jsonStructType:
+ // skip handling
+ default:
+ if typeDesc.Implements(protoIfaceType) {
+ return nil, nil
+ }
}
case reflect.Interface:
nv := n.Value()
@@ -109,3 +118,7 @@ func (n Null) Type() ref.Type {
func (n Null) Value() any {
return structpb.NullValue_NULL_VALUE
}
+
+func (n Null) format(sb *strings.Builder) {
+ sb.WriteString("null")
+}
diff --git a/vendor/github.com/google/cel-go/common/types/object.go b/vendor/github.com/google/cel-go/common/types/object.go
index 8ba0af9fb..776f6954a 100644
--- a/vendor/github.com/google/cel-go/common/types/object.go
+++ b/vendor/github.com/google/cel-go/common/types/object.go
@@ -17,9 +17,12 @@ package types
import (
"fmt"
"reflect"
+ "sort"
+ "strings"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
"github.com/google/cel-go/common/types/pb"
"github.com/google/cel-go/common/types/ref"
@@ -151,7 +154,7 @@ func (o *protoObj) Get(index ref.Val) ref.Val {
}
fv, err := fd.GetFrom(o.value)
if err != nil {
- return NewErr(err.Error())
+ return NewErrFromString(err.Error())
}
return o.NativeToValue(fv)
}
@@ -163,3 +166,29 @@ func (o *protoObj) Type() ref.Type {
func (o *protoObj) Value() any {
return o.value
}
+
+type protoObjField struct {
+ fd protoreflect.FieldDescriptor
+ v protoreflect.Value
+}
+
+func (o *protoObj) format(sb *strings.Builder) {
+ var fields []protoreflect.FieldDescriptor
+ o.value.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ fields = append(fields, fd)
+ return true
+ })
+ sort.SliceStable(fields, func(i, j int) bool {
+ return fields[i].Number() < fields[j].Number()
+ })
+ sb.WriteString(o.Type().TypeName())
+ sb.WriteString("{")
+ for i, field := range fields {
+ if i > 0 {
+ sb.WriteString(", ")
+ }
+ sb.WriteString(fmt.Sprintf("%s: ", field.Name()))
+ formatTo(sb, o.Get(String(field.Name())))
+ }
+ sb.WriteString("}")
+}
diff --git a/vendor/github.com/google/cel-go/common/types/optional.go b/vendor/github.com/google/cel-go/common/types/optional.go
index a9f30aed0..b8685ebf5 100644
--- a/vendor/github.com/google/cel-go/common/types/optional.go
+++ b/vendor/github.com/google/cel-go/common/types/optional.go
@@ -18,13 +18,14 @@ import (
"errors"
"fmt"
"reflect"
+ "strings"
"github.com/google/cel-go/common/types/ref"
)
var (
// OptionalType indicates the runtime type of an optional value.
- OptionalType = NewOpaqueType("optional")
+ OptionalType = NewOpaqueType("optional_type")
// OptionalNone is a sentinel value which is used to indicate an empty optional value.
OptionalNone = &Optional{}
@@ -94,6 +95,16 @@ func (o *Optional) String() string {
return "optional.none()"
}
+func (o *Optional) format(sb *strings.Builder) {
+ if o.HasValue() {
+ sb.WriteString(`optional.of(`)
+ formatTo(sb, o.GetValue())
+ sb.WriteString(`)`)
+ } else {
+ sb.WriteString("optional.none()")
+ }
+}
+
// Type implements the ref.Val interface method.
func (o *Optional) Type() ref.Type {
return OptionalType
diff --git a/vendor/github.com/google/cel-go/common/types/overflow.go b/vendor/github.com/google/cel-go/common/types/overflow.go
index c68a92182..dcb66ef59 100644
--- a/vendor/github.com/google/cel-go/common/types/overflow.go
+++ b/vendor/github.com/google/cel-go/common/types/overflow.go
@@ -326,6 +326,26 @@ func int64ToUint64Checked(v int64) (uint64, error) {
return uint64(v), nil
}
+// int64ToInt8Checked converts an int64 to an int8 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func int64ToInt8Checked(v int64) (int8, error) {
+ if v < math.MinInt8 || v > math.MaxInt8 {
+ return 0, errIntOverflow
+ }
+ return int8(v), nil
+}
+
+// int64ToInt16Checked converts an int64 to an int16 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func int64ToInt16Checked(v int64) (int16, error) {
+ if v < math.MinInt16 || v > math.MaxInt16 {
+ return 0, errIntOverflow
+ }
+ return int16(v), nil
+}
+
// int64ToInt32Checked converts an int64 to an int32 value.
//
// If the conversion fails due to overflow the error return value will be non-nil.
@@ -336,6 +356,26 @@ func int64ToInt32Checked(v int64) (int32, error) {
return int32(v), nil
}
+// uint64ToUint8Checked converts a uint64 to a uint8 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func uint64ToUint8Checked(v uint64) (uint8, error) {
+ if v > math.MaxUint8 {
+ return 0, errUintOverflow
+ }
+ return uint8(v), nil
+}
+
+// uint64ToUint16Checked converts a uint64 to a uint16 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func uint64ToUint16Checked(v uint64) (uint16, error) {
+ if v > math.MaxUint16 {
+ return 0, errUintOverflow
+ }
+ return uint16(v), nil
+}
+
// uint64ToUint32Checked converts a uint64 to a uint32 value.
//
// If the conversion fails due to overflow the error return value will be non-nil.
diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go
index 6cc95c276..bdd474c95 100644
--- a/vendor/github.com/google/cel-go/common/types/pb/type.go
+++ b/vendor/github.com/google/cel-go/common/types/pb/type.go
@@ -427,22 +427,49 @@ func unwrap(desc description, msg proto.Message) (any, bool, error) {
return structpb.NullValue_NULL_VALUE, true, nil
}
case *wrapperspb.BoolValue:
+ if v == nil {
+ return nil, true, nil
+ }
return v.GetValue(), true, nil
case *wrapperspb.BytesValue:
+ if v == nil {
+ return nil, true, nil
+ }
return v.GetValue(), true, nil
case *wrapperspb.DoubleValue:
+ if v == nil {
+ return nil, true, nil
+ }
return v.GetValue(), true, nil
case *wrapperspb.FloatValue:
+ if v == nil {
+ return nil, true, nil
+ }
return float64(v.GetValue()), true, nil
case *wrapperspb.Int32Value:
+ if v == nil {
+ return nil, true, nil
+ }
return int64(v.GetValue()), true, nil
case *wrapperspb.Int64Value:
+ if v == nil {
+ return nil, true, nil
+ }
return v.GetValue(), true, nil
case *wrapperspb.StringValue:
+ if v == nil {
+ return nil, true, nil
+ }
return v.GetValue(), true, nil
case *wrapperspb.UInt32Value:
+ if v == nil {
+ return nil, true, nil
+ }
return uint64(v.GetValue()), true, nil
case *wrapperspb.UInt64Value:
+ if v == nil {
+ return nil, true, nil
+ }
return v.GetValue(), true, nil
}
return msg, false, nil
diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go
index e80b4622e..936a4e28b 100644
--- a/vendor/github.com/google/cel-go/common/types/provider.go
+++ b/vendor/github.com/google/cel-go/common/types/provider.go
@@ -54,6 +54,10 @@ type Provider interface {
// Returns false if not found.
FindStructType(structType string) (*Type, bool)
+ // FindStructFieldNames returns thet field names associated with the type, if the type
+ // is found.
+ FindStructFieldNames(structType string) ([]string, bool)
+
// FieldStructFieldType returns the field type for a checked type value. Returns
// false if the field could not be found.
FindStructFieldType(structType, fieldName string) (*FieldType, bool)
@@ -154,7 +158,7 @@ func (p *Registry) EnumValue(enumName string) ref.Val {
return Int(enumVal.Value())
}
-// FieldFieldType returns the field type for a checked type value. Returns false if
+// FindFieldType returns the field type for a checked type value. Returns false if
// the field could not be found.
//
// Deprecated: use FindStructFieldType
@@ -173,7 +177,24 @@ func (p *Registry) FindFieldType(structType, fieldName string) (*ref.FieldType,
GetFrom: field.GetFrom}, true
}
-// FieldStructFieldType returns the field type for a checked type value. Returns
+// FindStructFieldNames returns the set of field names for the given struct type,
+// if the type exists in the registry.
+func (p *Registry) FindStructFieldNames(structType string) ([]string, bool) {
+ msgType, found := p.pbdb.DescribeType(structType)
+ if !found {
+ return []string{}, false
+ }
+ fieldMap := msgType.FieldMap()
+ fields := make([]string, len(fieldMap))
+ idx := 0
+ for f := range fieldMap {
+ fields[idx] = f
+ idx++
+ }
+ return fields, true
+}
+
+// FindStructFieldType returns the field type for a checked type value. Returns
// false if the field could not be found.
func (p *Registry) FindStructFieldType(structType, fieldName string) (*FieldType, bool) {
msgType, found := p.pbdb.DescribeType(structType)
@@ -255,7 +276,7 @@ func (p *Registry) NewValue(structType string, fields map[string]ref.Val) ref.Va
}
err := msgSetField(msg, field, value)
if err != nil {
- return &Err{err}
+ return &Err{error: err}
}
}
return p.NativeToValue(msg.Interface())
@@ -564,17 +585,46 @@ func nativeToValue(a Adapter, value any) (ref.Val, bool) {
refKind := refValue.Kind()
switch refKind {
case reflect.Array, reflect.Slice:
+ if refValue.Type().Elem() == reflect.TypeOf(byte(0)) {
+ if refValue.CanAddr() {
+ return Bytes(refValue.Bytes()), true
+ }
+ tmp := reflect.New(refValue.Type())
+ tmp.Elem().Set(refValue)
+ return Bytes(tmp.Elem().Bytes()), true
+ }
return NewDynamicList(a, v), true
case reflect.Map:
return NewDynamicMap(a, v), true
// type aliases of primitive types cannot be asserted as that type, but rather need
// to be downcast to int32 before being converted to a CEL representation.
+ case reflect.Bool:
+ boolTupe := reflect.TypeOf(false)
+ return Bool(refValue.Convert(boolTupe).Interface().(bool)), true
+ case reflect.Int:
+ intType := reflect.TypeOf(int(0))
+ return Int(refValue.Convert(intType).Interface().(int)), true
+ case reflect.Int8:
+ intType := reflect.TypeOf(int8(0))
+ return Int(refValue.Convert(intType).Interface().(int8)), true
+ case reflect.Int16:
+ intType := reflect.TypeOf(int16(0))
+ return Int(refValue.Convert(intType).Interface().(int16)), true
case reflect.Int32:
intType := reflect.TypeOf(int32(0))
return Int(refValue.Convert(intType).Interface().(int32)), true
case reflect.Int64:
intType := reflect.TypeOf(int64(0))
return Int(refValue.Convert(intType).Interface().(int64)), true
+ case reflect.Uint:
+ uintType := reflect.TypeOf(uint(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint)), true
+ case reflect.Uint8:
+ uintType := reflect.TypeOf(uint8(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint8)), true
+ case reflect.Uint16:
+ uintType := reflect.TypeOf(uint16(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint16)), true
case reflect.Uint32:
uintType := reflect.TypeOf(uint32(0))
return Uint(refValue.Convert(uintType).Interface().(uint32)), true
@@ -587,6 +637,9 @@ func nativeToValue(a Adapter, value any) (ref.Val, bool) {
case reflect.Float64:
doubleType := reflect.TypeOf(float64(0))
return Double(refValue.Convert(doubleType).Interface().(float64)), true
+ case reflect.String:
+ stringType := reflect.TypeOf("")
+ return String(refValue.Convert(stringType).Interface().(string)), true
}
}
return nil, false
diff --git a/vendor/github.com/google/cel-go/common/types/string.go b/vendor/github.com/google/cel-go/common/types/string.go
index 028e6824d..8aad4701c 100644
--- a/vendor/github.com/google/cel-go/common/types/string.go
+++ b/vendor/github.com/google/cel-go/common/types/string.go
@@ -66,10 +66,7 @@ func (s String) Compare(other ref.Val) ref.Val {
func (s String) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.String:
- if reflect.TypeOf(s).AssignableTo(typeDesc) {
- return s, nil
- }
- return s.Value(), nil
+ return reflect.ValueOf(s).Convert(typeDesc).Interface(), nil
case reflect.Ptr:
switch typeDesc {
case anyValueType:
@@ -158,7 +155,7 @@ func (s String) Match(pattern ref.Val) ref.Val {
}
matched, err := regexp.MatchString(pat.Value().(string), s.Value().(string))
if err != nil {
- return &Err{err}
+ return &Err{error: err}
}
return Bool(matched)
}
@@ -189,6 +186,10 @@ func (s String) Value() any {
return string(s)
}
+func (s String) format(sb *strings.Builder) {
+ sb.WriteString(strconv.Quote(string(s)))
+}
+
// StringContains returns whether the string contains a substring.
func StringContains(s, sub ref.Val) ref.Val {
str, ok := s.(String)
diff --git a/vendor/github.com/google/cel-go/common/types/timestamp.go b/vendor/github.com/google/cel-go/common/types/timestamp.go
index 33acdea8e..f7be58591 100644
--- a/vendor/github.com/google/cel-go/common/types/timestamp.go
+++ b/vendor/github.com/google/cel-go/common/types/timestamp.go
@@ -179,6 +179,10 @@ func (t Timestamp) Value() any {
return t.Time
}
+func (t Timestamp) format(sb *strings.Builder) {
+ fmt.Fprintf(sb, `timestamp("%s")`, t.Time.UTC().Format(time.RFC3339Nano))
+}
+
var (
timestampValueType = reflect.TypeOf(&tpb.Timestamp{})
diff --git a/vendor/github.com/google/cel-go/common/types/traits/iterator.go b/vendor/github.com/google/cel-go/common/types/traits/iterator.go
index 42dd371aa..91c10f08f 100644
--- a/vendor/github.com/google/cel-go/common/types/traits/iterator.go
+++ b/vendor/github.com/google/cel-go/common/types/traits/iterator.go
@@ -34,3 +34,16 @@ type Iterator interface {
// Next returns the next element.
Next() ref.Val
}
+
+// Foldable aggregate types support iteration over (key, value) or (index, value) pairs.
+type Foldable interface {
+ // Fold invokes the Folder.FoldEntry for all entries in the type
+ Fold(Folder)
+}
+
+// Folder performs a fold on a given entry and indicates whether to continue folding.
+type Folder interface {
+ // FoldEntry indicates the key, value pair associated with the entry.
+ // If the output is true, continue folding. Otherwise, terminate the fold.
+ FoldEntry(key, val any) bool
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/lister.go b/vendor/github.com/google/cel-go/common/types/traits/lister.go
index 5cf2593f3..e54781a60 100644
--- a/vendor/github.com/google/cel-go/common/types/traits/lister.go
+++ b/vendor/github.com/google/cel-go/common/types/traits/lister.go
@@ -27,6 +27,9 @@ type Lister interface {
}
// MutableLister interface which emits an immutable result after an intermediate computation.
+//
+// Note, this interface is intended only to be used within Comprehensions where the mutable
+// value is not directly observable within the user-authored CEL expression.
type MutableLister interface {
Lister
ToImmutableList() Lister
diff --git a/vendor/github.com/google/cel-go/common/types/traits/mapper.go b/vendor/github.com/google/cel-go/common/types/traits/mapper.go
index 2f7c919a8..d13333f3f 100644
--- a/vendor/github.com/google/cel-go/common/types/traits/mapper.go
+++ b/vendor/github.com/google/cel-go/common/types/traits/mapper.go
@@ -31,3 +31,18 @@ type Mapper interface {
// (Unknown|Err, false).
Find(key ref.Val) (ref.Val, bool)
}
+
+// MutableMapper interface which emits an immutable result after an intermediate computation.
+//
+// Note, this interface is intended only to be used within Comprehensions where the mutable
+// value is not directly observable within the user-authored CEL expression.
+type MutableMapper interface {
+ Mapper
+
+ // Insert a key, value pair into the map, returning the map if the insert is successful
+ // and an error if key already exists in the mutable map.
+ Insert(k, v ref.Val) ref.Val
+
+ // ToImmutableMap converts a mutable map into an immutable map.
+ ToImmutableMap() Mapper
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/traits.go b/vendor/github.com/google/cel-go/common/types/traits/traits.go
index 6da3e6a3e..51a09df56 100644
--- a/vendor/github.com/google/cel-go/common/types/traits/traits.go
+++ b/vendor/github.com/google/cel-go/common/types/traits/traits.go
@@ -59,6 +59,21 @@ const (
// SizerType types support the size() method.
SizerType
- // SubtractorType type support '-' operations.
+ // SubtractorType types support '-' operations.
SubtractorType
+
+ // FoldableType types support comprehensions v2 macros which iterate over (key, value) pairs.
+ FoldableType
+)
+
+const (
+ // ListerType supports a set of traits necessary for list operations.
+ //
+ // The ListerType is syntactic sugar and not intended to be a perfect reflection of all List operators.
+ ListerType = AdderType | ContainerType | IndexerType | IterableType | SizerType
+
+ // MapperType supports a set of traits necessary for map operations.
+ //
+ // The MapperType is syntactic sugar and not intended to be a perfect reflection of all Map operators.
+ MapperType = ContainerType | IndexerType | IterableType | SizerType
)
diff --git a/vendor/github.com/google/cel-go/common/types/types.go b/vendor/github.com/google/cel-go/common/types/types.go
index 76624eefd..78c77a9b5 100644
--- a/vendor/github.com/google/cel-go/common/types/types.go
+++ b/vendor/github.com/google/cel-go/common/types/types.go
@@ -19,10 +19,13 @@ import (
"reflect"
"strings"
+ "google.golang.org/protobuf/proto"
+
chkdecls "github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
+ celpb "cel.dev/expr"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
@@ -161,9 +164,9 @@ var (
traits.SubtractorType,
}
// ListType represents the runtime list type.
- ListType = NewListType(nil)
+ ListType = NewListType(DynType)
// MapType represents the runtime map type.
- MapType = NewMapType(nil, nil)
+ MapType = NewMapType(DynType, DynType)
// NullType represents the type of a null value.
NullType = &Type{
kind: NullTypeKind,
@@ -373,8 +376,32 @@ func (t *Type) TypeName() string {
return t.runtimeTypeName
}
+func (t *Type) format(sb *strings.Builder) {
+ sb.WriteString(t.TypeName())
+}
+
+// WithTraits creates a copy of the current Type and sets the trait mask to the traits parameter.
+//
+// This method should be used with Opaque types where the type acts like a container, e.g. vector.
+func (t *Type) WithTraits(traits int) *Type {
+ if t == nil {
+ return nil
+ }
+ return &Type{
+ kind: t.kind,
+ parameters: t.parameters,
+ runtimeTypeName: t.runtimeTypeName,
+ isAssignableType: t.isAssignableType,
+ isAssignableRuntimeType: t.isAssignableRuntimeType,
+ traitMask: traits,
+ }
+}
+
// String returns a human-readable definition of the type name.
func (t *Type) String() string {
+ if t.Kind() == TypeParamKind {
+ return fmt.Sprintf("<%s>", t.DeclaredTypeName())
+ }
if len(t.Parameters()) == 0 {
return t.DeclaredTypeName()
}
@@ -496,7 +523,7 @@ func NewNullableType(wrapped *Type) *Type {
// NewOptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional.
func NewOptionalType(param *Type) *Type {
- return NewOpaqueType("optional", param)
+ return NewOpaqueType("optional_type", param)
}
// NewOpaqueType creates an abstract parameterized type with a given name.
@@ -649,91 +676,118 @@ func TypeToExprType(t *Type) (*exprpb.Type, error) {
// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation.
func ExprTypeToType(t *exprpb.Type) (*Type, error) {
+ return AlphaProtoAsType(t)
+}
+
+// AlphaProtoAsType converts a CEL v1alpha1.Type protobuf type to a CEL-native type representation.
+func AlphaProtoAsType(t *exprpb.Type) (*Type, error) {
+ canonical := &celpb.Type{}
+ if err := convertProto(t, canonical); err != nil {
+ return nil, err
+ }
+ return ProtoAsType(canonical)
+}
+
+// ProtoAsType converts a canonical CEL celpb.Type protobuf type to a CEL-native type representation.
+func ProtoAsType(t *celpb.Type) (*Type, error) {
switch t.GetTypeKind().(type) {
- case *exprpb.Type_Dyn:
+ case *celpb.Type_Dyn:
return DynType, nil
- case *exprpb.Type_AbstractType_:
+ case *celpb.Type_AbstractType_:
paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes()))
for i, p := range t.GetAbstractType().GetParameterTypes() {
- pt, err := ExprTypeToType(p)
+ pt, err := ProtoAsType(p)
if err != nil {
return nil, err
}
paramTypes[i] = pt
}
return NewOpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil
- case *exprpb.Type_ListType_:
- et, err := ExprTypeToType(t.GetListType().GetElemType())
+ case *celpb.Type_ListType_:
+ et, err := ProtoAsType(t.GetListType().GetElemType())
if err != nil {
return nil, err
}
return NewListType(et), nil
- case *exprpb.Type_MapType_:
- kt, err := ExprTypeToType(t.GetMapType().GetKeyType())
+ case *celpb.Type_MapType_:
+ kt, err := ProtoAsType(t.GetMapType().GetKeyType())
if err != nil {
return nil, err
}
- vt, err := ExprTypeToType(t.GetMapType().GetValueType())
+ vt, err := ProtoAsType(t.GetMapType().GetValueType())
if err != nil {
return nil, err
}
return NewMapType(kt, vt), nil
- case *exprpb.Type_MessageType:
+ case *celpb.Type_MessageType:
return NewObjectType(t.GetMessageType()), nil
- case *exprpb.Type_Null:
+ case *celpb.Type_Null:
return NullType, nil
- case *exprpb.Type_Primitive:
+ case *celpb.Type_Primitive:
switch t.GetPrimitive() {
- case exprpb.Type_BOOL:
+ case celpb.Type_BOOL:
return BoolType, nil
- case exprpb.Type_BYTES:
+ case celpb.Type_BYTES:
return BytesType, nil
- case exprpb.Type_DOUBLE:
+ case celpb.Type_DOUBLE:
return DoubleType, nil
- case exprpb.Type_INT64:
+ case celpb.Type_INT64:
return IntType, nil
- case exprpb.Type_STRING:
+ case celpb.Type_STRING:
return StringType, nil
- case exprpb.Type_UINT64:
+ case celpb.Type_UINT64:
return UintType, nil
default:
return nil, fmt.Errorf("unsupported primitive type: %v", t)
}
- case *exprpb.Type_TypeParam:
+ case *celpb.Type_TypeParam:
return NewTypeParamType(t.GetTypeParam()), nil
- case *exprpb.Type_Type:
+ case *celpb.Type_Type:
if t.GetType().GetTypeKind() != nil {
- p, err := ExprTypeToType(t.GetType())
+ p, err := ProtoAsType(t.GetType())
if err != nil {
return nil, err
}
return NewTypeTypeWithParam(p), nil
}
return TypeType, nil
- case *exprpb.Type_WellKnown:
+ case *celpb.Type_WellKnown:
switch t.GetWellKnown() {
- case exprpb.Type_ANY:
+ case celpb.Type_ANY:
return AnyType, nil
- case exprpb.Type_DURATION:
+ case celpb.Type_DURATION:
return DurationType, nil
- case exprpb.Type_TIMESTAMP:
+ case celpb.Type_TIMESTAMP:
return TimestampType, nil
default:
return nil, fmt.Errorf("unsupported well-known type: %v", t)
}
- case *exprpb.Type_Wrapper:
- t, err := ExprTypeToType(&exprpb.Type{TypeKind: &exprpb.Type_Primitive{Primitive: t.GetWrapper()}})
+ case *celpb.Type_Wrapper:
+ t, err := ProtoAsType(&celpb.Type{TypeKind: &celpb.Type_Primitive{Primitive: t.GetWrapper()}})
if err != nil {
return nil, err
}
return NewNullableType(t), nil
- case *exprpb.Type_Error:
+ case *celpb.Type_Error:
return ErrorType, nil
default:
return nil, fmt.Errorf("unsupported type: %v", t)
}
}
+// TypeToProto converts from a CEL-native type representation to canonical CEL celpb.Type protobuf type.
+func TypeToProto(t *Type) (*celpb.Type, error) {
+ exprType, err := TypeToExprType(t)
+ if err != nil {
+ return nil, err
+ }
+ var pbtype celpb.Type
+ if err = convertProto(exprType, &pbtype); err != nil {
+ return nil, err
+ }
+ return &pbtype, nil
+}
+
func maybeWrapper(t *Type, pbType *exprpb.Type) *exprpb.Type {
if t.IsAssignableType(NullType) {
return chkdecls.NewWrapperType(pbType)
@@ -759,6 +813,23 @@ func maybeForeignType(t ref.Type) *Type {
return NewObjectType(t.TypeName(), traitMask)
}
+func convertProto(src, dst proto.Message) error {
+ pb, err := proto.Marshal(src)
+ if err != nil {
+ return err
+ }
+ err = proto.Unmarshal(pb, dst)
+ return err
+}
+
+func primitiveType(primitive celpb.Type_PrimitiveType) *celpb.Type {
+ return &celpb.Type{
+ TypeKind: &celpb.Type_Primitive{
+ Primitive: primitive,
+ },
+ }
+}
+
var (
checkedWellKnowns = map[string]*Type{
// Wrapper types.
@@ -803,4 +874,11 @@ var (
}
structTypeTraitMask = traits.FieldTesterType | traits.IndexerType
+
+ boolType = primitiveType(celpb.Type_BOOL)
+ bytesType = primitiveType(celpb.Type_BYTES)
+ doubleType = primitiveType(celpb.Type_DOUBLE)
+ intType = primitiveType(celpb.Type_INT64)
+ stringType = primitiveType(celpb.Type_STRING)
+ uintType = primitiveType(celpb.Type_UINT64)
)
diff --git a/vendor/github.com/google/cel-go/common/types/uint.go b/vendor/github.com/google/cel-go/common/types/uint.go
index 3257f9ade..a93405a13 100644
--- a/vendor/github.com/google/cel-go/common/types/uint.go
+++ b/vendor/github.com/google/cel-go/common/types/uint.go
@@ -19,6 +19,7 @@ import (
"math"
"reflect"
"strconv"
+ "strings"
"github.com/google/cel-go/common/types/ref"
@@ -80,6 +81,18 @@ func (i Uint) ConvertToNative(typeDesc reflect.Type) (any, error) {
return 0, err
}
return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Uint8:
+ v, err := uint64ToUint8Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Uint16:
+ v, err := uint64ToUint16Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
case reflect.Uint64:
return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil
case reflect.Ptr:
@@ -238,6 +251,11 @@ func (i Uint) Value() any {
return uint64(i)
}
+func (i Uint) format(sb *strings.Builder) {
+ sb.WriteString(strconv.FormatUint(uint64(i), 10))
+ sb.WriteString("u")
+}
+
// isJSONSafe indicates whether the uint is safely representable as a floating point value in JSON.
func (i Uint) isJSONSafe() bool {
return i <= maxIntJSON
diff --git a/vendor/github.com/google/cel-go/ext/BUILD.bazel b/vendor/github.com/google/cel-go/ext/BUILD.bazel
index 6fdcc60c6..ef4f4ec3d 100644
--- a/vendor/github.com/google/cel-go/ext/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/ext/BUILD.bazel
@@ -7,12 +7,18 @@ package(
go_library(
name = "go_default_library",
srcs = [
+ "bindings.go",
+ "comprehensions.go",
"encoders.go",
+ "extension_option_factory.go",
+ "formatting.go",
+ "formatting_v2.go",
"guards.go",
"lists.go",
"math.go",
"native.go",
"protos.go",
+ "regex.go",
"sets.go",
"strings.go",
],
@@ -21,14 +27,18 @@ go_library(
deps = [
"//cel:go_default_library",
"//checker:go_default_library",
- "//checker/decls:go_default_library",
+ "//common:go_default_library",
+ "//common/ast:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/env:go_default_library",
+ "//common/operators:go_default_library",
"//common/overloads:go_default_library",
"//common/types:go_default_library",
"//common/types/pb:go_default_library",
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
"//interpreter:go_default_library",
- "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "//parser:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
"@org_golang_google_protobuf//types/known/structpb",
@@ -41,11 +51,17 @@ go_test(
name = "go_default_test",
size = "small",
srcs = [
+ "bindings_test.go",
+ "comprehensions_test.go",
"encoders_test.go",
+ "extension_option_factory_test.go",
+ "formatting_test.go",
+ "formatting_v2_test.go",
"lists_test.go",
"math_test.go",
"native_test.go",
"protos_test.go",
+ "regex_test.go",
"sets_test.go",
"strings_test.go",
],
@@ -55,15 +71,16 @@ go_test(
deps = [
"//cel:go_default_library",
"//checker:go_default_library",
+ "//common:go_default_library",
+ "//common/env:go_default_library",
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
"//test:go_default_library",
- "//test/proto2pb:go_default_library",
- "//test/proto3pb:go_default_library",
- "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "//test/proto2pb:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@org_golang_google_protobuf//encoding/protojson:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
- "@org_golang_google_protobuf//encoding/protojson:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/ext/README.md b/vendor/github.com/google/cel-go/ext/README.md
index 6f621ac4a..41ae6a314 100644
--- a/vendor/github.com/google/cel-go/ext/README.md
+++ b/vendor/github.com/google/cel-go/ext/README.md
@@ -3,15 +3,15 @@
CEL extensions are a related set of constants, functions, macros, or other
features which may not be covered by the core CEL spec.
-## Bindings
+## Bindings
Returns a cel.EnvOption to configure support for local variable bindings
in expressions.
-# Cel.Bind
+### Cel.Bind
Binds a simple identifier to an initialization expression which may be used
-in a subsequenct result expression. Bindings may also be nested within each
+in a subsequent result expression. Bindings may also be nested within each
other.
cel.bind(, , )
@@ -19,17 +19,17 @@ other.
Examples:
cel.bind(a, 'hello',
- cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello"
+ cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello"
// Avoid a list allocation within the exists comprehension.
cel.bind(valid_values, [a, b, c],
- [d, e, f].exists(elem, elem in valid_values))
+ [d, e, f].exists(elem, elem in valid_values))
Local bindings are not guaranteed to be evaluated before use.
## Encoders
-Encoding utilies for marshalling data into standardized representations.
+Encoding utilities for marshalling data into standardized representations.
### Base64.Decode
@@ -100,7 +100,8 @@ argument. Simple numeric and list literals are supported as valid argument
types; however, other literals will be flagged as errors during macro
expansion. If the argument expression does not resolve to a numeric or
list(numeric) type during type-checking, or during runtime then an error
-will be produced. If a list argument is empty, this too will produce an error.
+will be produced. If a list argument is empty, this too will produce an
+error.
math.least(, ...) ->
@@ -117,6 +118,261 @@ Examples:
math.least(a, b) // check-time error if a or b is non-numeric
math.least(dyn('string')) // runtime error
+### Math.BitOr
+
+Introduced at version: 1
+
+Performs a bitwise-OR operation over two int or uint values.
+
+ math.bitOr(, ) ->
+ math.bitOr(, ) ->
+
+Examples:
+
+ math.bitOr(1u, 2u) // returns 3u
+ math.bitOr(-2, -4) // returns -2
+
+### Math.BitAnd
+
+Introduced at version: 1
+
+Performs a bitwise-AND operation over two int or uint values.
+
+ math.bitAnd(, ) ->
+ math.bitAnd(, ) ->
+
+Examples:
+
+ math.bitAnd(3u, 2u) // return 2u
+ math.bitAnd(3, 5) // returns 3
+ math.bitAnd(-3, -5) // returns -7
+
+### Math.BitXor
+
+Introduced at version: 1
+
+ math.bitXor(, ) ->
+ math.bitXor(, ) ->
+
+Performs a bitwise-XOR operation over two int or uint values.
+
+Examples:
+
+ math.bitXor(3u, 5u) // returns 6u
+ math.bitXor(1, 3) // returns 2
+
+### Math.BitNot
+
+Introduced at version: 1
+
+Function which accepts a single int or uint and performs a bitwise-NOT
+ones-complement of the given binary value.
+
+ math.bitNot() ->
+ math.bitNot() ->
+
+Examples
+
+ math.bitNot(1) // returns -1
+ math.bitNot(-1) // return 0
+ math.bitNot(0u) // returns 18446744073709551615u
+
+### Math.BitShiftLeft
+
+Introduced at version: 1
+
+Perform a left shift of bits on the first parameter, by the amount of bits
+specified in the second parameter. The first parameter is either a uint or
+an int. The second parameter must be an int.
+
+When the second parameter is 64 or greater, 0 will be always be returned
+since the number of bits shifted is greater than or equal to the total bit
+length of the number being shifted. Negative valued bit shifts will result
+in a runtime error.
+
+ math.bitShiftLeft(, ) ->
+ math.bitShiftLeft(, ) ->
+
+Examples
+
+ math.bitShiftLeft(1, 2) // returns 4
+ math.bitShiftLeft(-1, 2) // returns -4
+ math.bitShiftLeft(1u, 2) // return 4u
+ math.bitShiftLeft(1u, 200) // returns 0u
+
+### Math.BitShiftRight
+
+Introduced at version: 1
+
+Perform a right shift of bits on the first parameter, by the amount of bits
+specified in the second parameter. The first parameter is either a uint or
+an int. The second parameter must be an int.
+
+When the second parameter is 64 or greater, 0 will always be returned since
+the number of bits shifted is greater than or equal to the total bit length
+of the number being shifted. Negative valued bit shifts will result in a
+runtime error.
+
+The sign bit extension will not be preserved for this operation: vacant bits
+on the left are filled with 0.
+
+ math.bitShiftRight(, ) ->
+ math.bitShiftRight(, ) ->
+
+Examples
+
+ math.bitShiftRight(1024, 2) // returns 256
+ math.bitShiftRight(1024u, 2) // returns 256u
+ math.bitShiftRight(1024u, 64) // returns 0u
+
+### Math.Ceil
+
+Introduced at version: 1
+
+Compute the ceiling of a double value.
+
+ math.ceil() ->
+
+Examples:
+
+ math.ceil(1.2) // returns 2.0
+ math.ceil(-1.2) // returns -1.0
+
+### Math.Floor
+
+Introduced at version: 1
+
+Compute the floor of a double value.
+
+ math.floor() ->
+
+Examples:
+
+ math.floor(1.2) // returns 1.0
+ math.floor(-1.2) // returns -2.0
+
+### Math.Round
+
+Introduced at version: 1
+
+Rounds the double value to the nearest whole number with ties rounding away
+from zero, e.g. 1.5 -> 2.0, -1.5 -> -2.0.
+
+ math.round() ->
+
+Examples:
+
+ math.round(1.2) // returns 1.0
+ math.round(1.5) // returns 2.0
+ math.round(-1.5) // returns -2.0
+
+### Math.Trunc
+
+Introduced at version: 1
+
+Truncates the fractional portion of the double value.
+
+ math.trunc() ->
+
+Examples:
+
+ math.trunc(-1.3) // returns -1.0
+ math.trunc(1.3) // returns 1.0
+
+### Math.Abs
+
+Introduced at version: 1
+
+Returns the absolute value of the numeric type provided as input. If the
+value is NaN, the output is NaN. If the input is int64 min, the function
+will result in an overflow error.
+
+ math.abs() ->
+ math.abs() ->
+ math.abs() ->
+
+Examples:
+
+ math.abs(-1) // returns 1
+ math.abs(1) // returns 1
+ math.abs(-9223372036854775808) // overlflow error
+
+### Math.Sign
+
+Introduced at version: 1
+
+Returns the sign of the numeric type, either -1, 0, 1 as an int, double, or
+uint depending on the overload. For floating point values, if NaN is
+provided as input, the output is also NaN. The implementation does not
+differentiate between positive and negative zero.
+
+ math.sign() ->
+ math.sign() ->
+ math.sign() ->
+
+Examples:
+
+ math.sign(-42) // returns -1
+ math.sign(0) // returns 0
+ math.sign(42) // returns 1
+
+### Math.IsInf
+
+Introduced at version: 1
+
+Returns true if the input double value is -Inf or +Inf.
+
+ math.isInf() ->
+
+Examples:
+
+ math.isInf(1.0/0.0) // returns true
+ math.isInf(1.2) // returns false
+
+### Math.IsNaN
+
+Introduced at version: 1
+
+Returns true if the input double value is NaN, false otherwise.
+
+ math.isNaN() ->
+
+Examples:
+
+ math.isNaN(0.0/0.0) // returns true
+ math.isNaN(1.2) // returns false
+
+### Math.IsFinite
+
+Introduced at version: 1
+
+Returns true if the value is a finite number. Equivalent in behavior to:
+!math.isNaN(double) && !math.isInf(double)
+
+ math.isFinite() ->
+
+Examples:
+
+ math.isFinite(0.0/0.0) // returns false
+ math.isFinite(1.2) // returns true
+
+### Math.Sqrt
+
+Introduced at version: 2
+
+Returns the square root of the given input as double
+Throws error for negative or non-numeric inputs
+
+ math.sqrt() ->
+ math.sqrt() ->
+ math.sqrt() ->
+
+Examples:
+
+ math.sqrt(81) // returns 9.0
+ math.sqrt(985.25) // returns 31.388692231439016
+ math.sqrt(-15) // returns NaN
+
## Protos
Protos configure extended macros and functions for proto manipulation.
@@ -154,8 +410,68 @@ Example:
Extended functions for list manipulation. As a general note, all indices are
zero-based.
+### Distinct
+
+**Introduced in version 2 (cost support in version 3)**
+
+Returns the distinct elements of a list.
+
+ .distinct() ->
+
+Examples:
+
+ [1, 2, 2, 3, 3, 3].distinct() // return [1, 2, 3]
+ ["b", "b", "c", "a", "c"].distinct() // return ["b", "c", "a"]
+ [1, "b", 2, "b"].distinct() // return [1, "b", 2]
+
+### Flatten
+
+**Introduced in version 1 (cost support in version 3)**
+
+Flattens a list recursively.
+If an optional depth is provided, the list is flattened to a the specificied level.
+A negative depth value will result in an error.
+
+ .flatten() ->
+ .flatten(, ) ->
+
+Examples:
+
+ [1,[2,3],[4]].flatten() // return [1, 2, 3, 4]
+ [1,[2,[3,4]]].flatten() // return [1, 2, [3, 4]]
+ [1,2,[],[],[3,4]].flatten() // return [1, 2, 3, 4]
+ [1,[2,[3,[4]]]].flatten(2) // return [1, 2, 3, [4]]
+ [1,[2,[3,[4]]]].flatten(-1) // error
+
+### Range
+
+**Introduced in version 2 (cost support in version 3)**
+
+Returns a list of integers from 0 to n-1.
+
+ lists.range() ->
+
+Examples:
+
+ lists.range(5) -> [0, 1, 2, 3, 4]
+
+
+### Reverse
+
+**Introduced in version 2 (cost support in version 3)**
+
+Returns the elements of a list in reverse order.
+
+ .reverse() ->
+
+Examples:
+
+ [5, 3, 1, 2].reverse() // return [2, 1, 3, 5]
+
+
### Slice
+**Introduced in version 0 (cost support in version 3)**
Returns a new sub-list using the indexes provided.
@@ -164,7 +480,73 @@ Returns a new sub-list using the indexes provided.
Examples:
[1,2,3,4].slice(1, 3) // return [2, 3]
- [1,2,3,4].slice(2, 4) // return [3 ,4]
+ [1,2,3,4].slice(2, 4) // return [3, 4]
+
+### Sort
+
+**Introduced in version 2 (cost support in version 3)**
+
+Sorts a list with comparable elements. If the element type is not comparable
+or the element types are not the same, the function will produce an error.
+
+ .sort() ->
+ T in {int, uint, double, bool, duration, timestamp, string, bytes}
+
+Examples:
+
+ [3, 2, 1].sort() // return [1, 2, 3]
+ ["b", "c", "a"].sort() // return ["a", "b", "c"]
+ [1, "b"].sort() // error
+ [[1, 2, 3]].sort() // error
+
+### SortBy
+
+**Introduced in version 2 (cost support in version 3)**
+
+Sorts a list by a key value, i.e., the order is determined by the result of
+an expression applied to each element of the list.
+
+ .sortBy(, ) ->
+ keyExpr returns a value in {int, uint, double, bool, duration, timestamp, string, bytes}
+
+Examples:
+
+ [
+ Player { name: "foo", score: 0 },
+ Player { name: "bar", score: -10 },
+ Player { name: "baz", score: 1000 },
+ ].sortBy(e, e.score).map(e, e.name)
+ == ["bar", "foo", "baz"]
+
+### Last
+
+**Introduced in the OptionalTypes library version 2**
+
+Returns an optional with the last value from the list or `optional.None` if the
+list is empty.
+
+ .last() ->
+
+Examples:
+
+ [1, 2, 3].last().value() == 3
+ [].last().orValue('test') == 'test'
+
+This is syntactic sugar for list[list.size()-1].
+
+### First
+
+**Introduced in the OptionalTypes library version 2**
+
+Returns an optional with the first value from the list or `optional.None` if the
+list is empty.
+
+ .first() ->
+
+Examples:
+
+ [1, 2, 3].first().value() == 1
+ [].first().orValue('test') == 'test'
## Sets
@@ -259,7 +641,8 @@ Examples:
'hello mellow'.indexOf('jello') // returns -1
'hello mellow'.indexOf('', 2) // returns 2
'hello mellow'.indexOf('ello', 2) // returns 7
- 'hello mellow'.indexOf('ello', 20) // error
+ 'hello mellow'.indexOf('ello', 20) // returns -1
+ 'hello mellow'.indexOf('ello', -1) // error
### Join
@@ -273,10 +656,10 @@ elements in the resulting string.
Examples:
- ['hello', 'mellow'].join() // returns 'hellomellow'
- ['hello', 'mellow'].join(' ') // returns 'hello mellow'
- [].join() // returns ''
- [].join('/') // returns ''
+ ['hello', 'mellow'].join() // returns 'hellomellow'
+ ['hello', 'mellow'].join(' ') // returns 'hello mellow'
+ [].join() // returns ''
+ [].join('/') // returns ''
### LastIndexOf
@@ -297,6 +680,7 @@ Examples:
'hello mellow'.lastIndexOf('ello') // returns 7
'hello mellow'.lastIndexOf('jello') // returns -1
'hello mellow'.lastIndexOf('ello', 6) // returns 1
+ 'hello mellow'.lastIndexOf('ello', 20) // returns -1
'hello mellow'.lastIndexOf('ello', -1) // error
### LowerAscii
@@ -414,3 +798,150 @@ Examples:
'TacoCat'.upperAscii() // returns 'TACOCAT'
'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII'
+
+### Reverse
+
+Returns a new string whose characters are the same as the target string, only formatted in
+reverse order.
+This function relies on converting strings to rune arrays in order to reverse.
+It can be located in Version 3 of strings.
+
+ .reverse() ->
+
+Examples:
+
+ 'gums'.reverse() // returns 'smug'
+ 'John Smith'.reverse() // returns 'htimS nhoJ'
+
+## TwoVarComprehensions
+
+TwoVarComprehensions introduces support for two-variable comprehensions.
+
+The two-variable form of comprehensions looks similar to the one-variable
+counterparts. Where possible, the same macro names were used and additional
+macro signatures added. The notable distinction for two-variable comprehensions
+is the introduction of `transformList`, `transformMap`, and `transformMapEntry`
+support for list and map types rather than the more traditional `map` and
+`filter` macros.
+
+### All
+
+Comprehension which tests whether all elements in the list or map satisfy a
+given predicate. The `all` macro evaluates in a manner consistent with logical
+AND and will short-circuit when encountering a `false` value.
+
+ .all(indexVar, valueVar, ) -> bool
+